Re: [PATCH v3 07/11] arm: Use generic mmap top-down layout

From: Kees Cook
Date: Thu Apr 18 2019 - 01:51:34 EST


On Wed, Apr 17, 2019 at 12:30 AM Alexandre Ghiti <alex@xxxxxxxx> wrote:
>
> arm uses a top-down mmap layout by default that exactly fits the generic
> functions, so get rid of arch specific code and use the generic version
> by selecting ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT.
>
> Signed-off-by: Alexandre Ghiti <alex@xxxxxxxx>

Acked-by: Kees Cook <keescook@xxxxxxxxxxxx>

-Kees

> ---
> arch/arm/Kconfig | 1 +
> arch/arm/include/asm/processor.h | 2 --
> arch/arm/mm/mmap.c | 62 --------------------------------
> 3 files changed, 1 insertion(+), 64 deletions(-)
>
> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
> index 850b4805e2d1..f8f603da181f 100644
> --- a/arch/arm/Kconfig
> +++ b/arch/arm/Kconfig
> @@ -28,6 +28,7 @@ config ARM
> select ARCH_SUPPORTS_ATOMIC_RMW
> select ARCH_USE_BUILTIN_BSWAP
> select ARCH_USE_CMPXCHG_LOCKREF
> + select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
> select ARCH_WANT_IPC_PARSE_VERSION
> select BUILDTIME_EXTABLE_SORT if MMU
> select CLONE_BACKWARDS
> diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
> index 57fe73ea0f72..944ef1fb1237 100644
> --- a/arch/arm/include/asm/processor.h
> +++ b/arch/arm/include/asm/processor.h
> @@ -143,8 +143,6 @@ static inline void prefetchw(const void *ptr)
> #endif
> #endif
>
> -#define HAVE_ARCH_PICK_MMAP_LAYOUT
> -
> #endif
>
> #endif /* __ASM_ARM_PROCESSOR_H */
> diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
> index 0b94b674aa91..b8d912ac9e61 100644
> --- a/arch/arm/mm/mmap.c
> +++ b/arch/arm/mm/mmap.c
> @@ -17,43 +17,6 @@
> ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
> (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
>
> -/* gap between mmap and stack */
> -#define MIN_GAP (128*1024*1024UL)
> -#define MAX_GAP ((STACK_TOP)/6*5)
> -#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
> -
> -static int mmap_is_legacy(struct rlimit *rlim_stack)
> -{
> - if (current->personality & ADDR_COMPAT_LAYOUT)
> - return 1;
> -
> - if (rlim_stack->rlim_cur == RLIM_INFINITY)
> - return 1;
> -
> - return sysctl_legacy_va_layout;
> -}
> -
> -static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
> -{
> - unsigned long gap = rlim_stack->rlim_cur;
> - unsigned long pad = stack_guard_gap;
> -
> - /* Account for stack randomization if necessary */
> - if (current->flags & PF_RANDOMIZE)
> - pad += (STACK_RND_MASK << PAGE_SHIFT);
> -
> - /* Values close to RLIM_INFINITY can overflow. */
> - if (gap + pad > gap)
> - gap += pad;
> -
> - if (gap < MIN_GAP)
> - gap = MIN_GAP;
> - else if (gap > MAX_GAP)
> - gap = MAX_GAP;
> -
> - return PAGE_ALIGN(STACK_TOP - gap - rnd);
> -}
> -
> /*
> * We need to ensure that shared mappings are correctly aligned to
> * avoid aliasing issues with VIPT caches. We need to ensure that
> @@ -181,31 +144,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
> return addr;
> }
>
> -unsigned long arch_mmap_rnd(void)
> -{
> - unsigned long rnd;
> -
> - rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
> -
> - return rnd << PAGE_SHIFT;
> -}
> -
> -void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
> -{
> - unsigned long random_factor = 0UL;
> -
> - if (current->flags & PF_RANDOMIZE)
> - random_factor = arch_mmap_rnd();
> -
> - if (mmap_is_legacy(rlim_stack)) {
> - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
> - mm->get_unmapped_area = arch_get_unmapped_area;
> - } else {
> - mm->mmap_base = mmap_base(random_factor, rlim_stack);
> - mm->get_unmapped_area = arch_get_unmapped_area_topdown;
> - }
> -}
> -
> /*
> * You really shouldn't be using read() or write() on /dev/mem. This
> * might go away in the future.
> --
> 2.20.1
>


--
Kees Cook