Re: [PATCH v2 29/37] powerpc/nohash: Replace pte_user() by pte_read()

From: Aneesh Kumar K.V
Date: Tue Nov 07 2023 - 08:34:45 EST


Christophe Leroy <christophe.leroy@xxxxxxxxxx> writes:

> Le 31/10/2023 à 11:15, Aneesh Kumar K.V a écrit :
>> Christophe Leroy <christophe.leroy@xxxxxxxxxx> writes:
>>
>>> pte_user() is now only used in pte_access_permitted() to check
>>> access on vmas. User flag is cleared to make a page unreadable.
>>>
>>> So rename it pte_read() and remove pte_user() which isn't used
>>> anymore.
>>>
>>> For the time being it checks _PAGE_USER but in the near futur
>>> all plateforms will be converted to _PAGE_READ so lets support
>>> both for now.
>>>
>>> Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx>
>>> ---
>>> arch/powerpc/include/asm/nohash/32/pte-8xx.h | 7 -------
>>> arch/powerpc/include/asm/nohash/pgtable.h | 13 +++++++------
>>> arch/powerpc/mm/ioremap.c | 4 ----
>>> 3 files changed, 7 insertions(+), 17 deletions(-)
>>>
>>> diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>>> index 62c965a4511a..1ee38befd29a 100644
>>> --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>>> +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
>>> @@ -112,13 +112,6 @@ static inline pte_t pte_mkwrite_novma(pte_t pte)
>>>
>>> #define pte_mkwrite_novma pte_mkwrite_novma
>>>
>>> -static inline bool pte_user(pte_t pte)
>>> -{
>>> - return !(pte_val(pte) & _PAGE_SH);
>>> -}
>>> -
>>> -#define pte_user pte_user
>>> -
>>> static inline pte_t pte_mkhuge(pte_t pte)
>>> {
>>> return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
>>> diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
>>> index ee677162f9e6..aba56fe3b1c6 100644
>>> --- a/arch/powerpc/include/asm/nohash/pgtable.h
>>> +++ b/arch/powerpc/include/asm/nohash/pgtable.h
>>> @@ -160,9 +160,6 @@ static inline int pte_write(pte_t pte)
>>> return pte_val(pte) & _PAGE_WRITE;
>>> }
>>> #endif
>>> -#ifndef pte_read
>>> -static inline int pte_read(pte_t pte) { return 1; }
>>> -#endif
>>> static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
>>> static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
>>> static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
>>> @@ -190,10 +187,14 @@ static inline int pte_young(pte_t pte)
>>> * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
>>> * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
>>> */
>>> -#ifndef pte_user
>>> -static inline bool pte_user(pte_t pte)
>>> +#ifndef pte_read
>>> +static inline bool pte_read(pte_t pte)
>>> {
>>> +#ifdef _PAGE_READ
>>> + return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
>>> +#else
>>> return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
>>> +#endif
>>> }
>>> #endif
>>>
>>> @@ -208,7 +209,7 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
>>> * A read-only access is controlled by _PAGE_USER bit.
>>> * We have _PAGE_READ set for WRITE and EXECUTE
>>> */
>>> - if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
>>> + if (!pte_present(pte) || !pte_read(pte))
>>> return false;
>>>
>>> if (write && !pte_write(pte))
>>> diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
>>> index 7823c38f09de..7b0afcabd89f 100644
>>> --- a/arch/powerpc/mm/ioremap.c
>>> +++ b/arch/powerpc/mm/ioremap.c
>>> @@ -50,10 +50,6 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
>>> if (pte_write(pte))
>>> pte = pte_mkdirty(pte);
>>>
>>> - /* we don't want to let _PAGE_USER leak out */
>>> - if (WARN_ON(pte_user(pte)))
>>> - return NULL;
>>>
>>
>> This check is still valid right? I understand that we want to remove
>> _PAGE_USER. But then loosing this check is ok?
>
> Well, we may have to think about it for book3s/64. For all others
> _PAGE_USER is gone and replaced by a check of addresses versus TASK_SIZE.
>
> As ioremap() will map into vmalloc space that address is necesserally
> correct.
>
>

We are adding the pte flags check not the map addr check there. Something like this?

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 7c7de7b56df0..b053b86e0069 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1462,5 +1462,11 @@ static inline bool pud_is_leaf(pud_t pud)
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}

+#define arch_ioremap_valid_pte arch_ioremap_valid_pte
+static inline bool arch_ioremap_valid_pte(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 137dc3c84e45..7b23d2543528 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -223,5 +223,11 @@ static inline pte_t ptep_get(pte_t *ptep)

#endif

+#define arch_ioremap_valid_pte arch_ioremap_valid_pte
+static inline bool arch_ioremap_valid_pte(pte_t pte)
+{
+ return !!(pte_val(pte) & (_PAGE_SH))
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h
index f516f0b5b7a8..d31274178aa6 100644
--- a/arch/powerpc/include/asm/nohash/pte-e500.h
+++ b/arch/powerpc/include/asm/nohash/pte-e500.h
@@ -105,6 +105,13 @@ static inline pte_t pte_mkexec(pte_t pte)
}
#define pte_mkexec pte_mkexec

+#define arch_ioremap_valid_pte arch_ioremap_valid_pte
+static inline bool arch_ioremap_valid_pte(pte_t pte)
+{
+ return !(pte_val(pte) & (_PAGE_BAP_UR | _PAGE_BAP_UW | _PAGE_BAP_UX))
+}
+
+
#endif /* __ASSEMBLY__ */

#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 2bfb7dd3b49e..417abe5dcbd8 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -231,6 +231,13 @@ static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)

#endif /* CONFIG_PPC64 */

+#ifndef arch_ioremap_valid_pte
+static inline book arch_ioremap_valid_pte(pte_t pte)
+{
+ return true;
+}
+#endif
+
#endif /* __ASSEMBLY__ */

#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 7b0afcabd89f..1a39e698c3d4 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -50,6 +50,9 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
if (pte_write(pte))
pte = pte_mkdirty(pte);

+ if (WARN_ON(!arch_ioremap_valid_pte(pte)))
+ return NULL;
+
if (iowa_is_active())
return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
return __ioremap_caller(addr, size, pte_pgprot(pte), caller);