[PATCH 3.2 079/115] x86: standardize mmap_rnd() usage

From: Ben Hutchings
Date: Tue Apr 26 2016 - 19:21:49 EST


3.2.80-rc1 review patch. If anyone has any objections, please let me know.

------------------

From: Kees Cook <keescook@xxxxxxxxxxxx>

commit 82168140bc4cec7ec9bad39705518541149ff8b7 upstream.

In preparation for splitting out ET_DYN ASLR, this refactors the use of
mmap_rnd() to be used similarly to arm, and extracts the checking of
PF_RANDOMIZE.

Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx>
Reviewed-by: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Oleg Nesterov <oleg@xxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
[bwh: Backported to 3.2: adjust context]
Signed-off-by: Ben Hutchings <ben@xxxxxxxxxxxxxxx>
---
arch/x86/mm/mmap.c | 36 ++++++++++++++++++++----------------
1 file changed, 20 insertions(+), 16 deletions(-)

--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -67,22 +67,21 @@ static int mmap_is_legacy(void)

static unsigned long mmap_rnd(void)
{
- unsigned long rnd = 0;
+ unsigned long rnd;

/*
- * 8 bits of randomness in 32bit mmaps, 20 address space bits
- * 28 bits of randomness in 64bit mmaps, 40 address space bits
- */
- if (current->flags & PF_RANDOMIZE) {
- if (mmap_is_ia32())
- rnd = get_random_int() % (1<<8);
- else
- rnd = get_random_int() % (1<<28);
- }
+ * 8 bits of randomness in 32bit mmaps, 20 address space bits
+ * 28 bits of randomness in 64bit mmaps, 40 address space bits
+ */
+ if (mmap_is_ia32())
+ rnd = (unsigned long)get_random_int() % (1<<8);
+ else
+ rnd = (unsigned long)get_random_int() % (1<<28);
+
return rnd << PAGE_SHIFT;
}

-static unsigned long mmap_base(void)
+static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);

@@ -91,19 +90,19 @@ static unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;

- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+ return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}

/*
* Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
* does, but not when emulating X86_32
*/
-static unsigned long mmap_legacy_base(void)
+static unsigned long mmap_legacy_base(unsigned long rnd)
{
if (mmap_is_ia32())
return TASK_UNMAPPED_BASE;
else
- return TASK_UNMAPPED_BASE + mmap_rnd();
+ return TASK_UNMAPPED_BASE + rnd;
}

/*
@@ -112,14 +111,19 @@ static unsigned long mmap_legacy_base(vo
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
- mm->mmap_legacy_base = mmap_legacy_base();
- mm->mmap_base = mmap_base();
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = mmap_rnd();
+
+ mm->mmap_legacy_base = mmap_legacy_base(random_factor);

if (mmap_is_legacy()) {
mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}