[PATCH v2] x86/boot: Rename overlapping memcpy() to memmove()

From: Kees Cook
Date: Tue Apr 26 2016 - 17:46:26 EST


Instead of having non-standard memcpy() behavior, explicitly call the new
function memmove(), make it available to the decompressors, and switch
the two overlap cases (screen scrolling and ELF parsing) to use memmove().
Additionally documents the purpose of compressed/string.c.

Suggested-by: Lasse Collin <lasse.collin@xxxxxxxxxxx>
Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx>
---
Applies on top of tip:x86/boot
---
arch/x86/boot/compressed/misc.c | 6 ++++--
arch/x86/boot/compressed/string.c | 19 +++++++++++--------
2 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index c57d785ff955..6dde6ccdf00e 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -32,9 +32,11 @@
#undef memcpy
#undef memset
#define memzero(s, n) memset((s), 0, (n))
+#define memmove memmove

/* Functions used by the included decompressor code below. */
static void error(char *m);
+void *memmove(void *dest, const void *src, size_t n);

/*
* This is set up by the setup-routine at boot-time
@@ -80,7 +82,7 @@ static void scroll(void)
{
int i;

- memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
+ memmove(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
vidmem[i] = ' ';
}
@@ -307,7 +309,7 @@ static void parse_elf(void *output)
#else
dest = (void *)(phdr->p_paddr);
#endif
- memcpy(dest, output + phdr->p_offset, phdr->p_filesz);
+ memmove(dest, output + phdr->p_offset, phdr->p_filesz);
break;
default: /* Ignore other PT_* */ break;
}
diff --git a/arch/x86/boot/compressed/string.c b/arch/x86/boot/compressed/string.c
index 1e10e40f49dd..2befeca1aada 100644
--- a/arch/x86/boot/compressed/string.c
+++ b/arch/x86/boot/compressed/string.c
@@ -1,7 +1,14 @@
+/*
+ * This provides an optimized implementation of memcpy, and a simplified
+ * implementation of memset and memmove. These are used here because the
+ * standard kernel runtime versions are not yet available and we don't
+ * trust the gcc built-in implementations as they may do unexpected things
+ * (e.g. FPU ops) in the minimal decompression stub execution environment.
+ */
#include "../string.c"

#ifdef CONFIG_X86_32
-void *__memcpy(void *dest, const void *src, size_t n)
+void *memcpy(void *dest, const void *src, size_t n)
{
int d0, d1, d2;
asm volatile(
@@ -15,7 +22,7 @@ void *__memcpy(void *dest, const void *src, size_t n)
return dest;
}
#else
-void *__memcpy(void *dest, const void *src, size_t n)
+void *memcpy(void *dest, const void *src, size_t n)
{
long d0, d1, d2;
asm volatile(
@@ -40,17 +47,13 @@ void *memset(void *s, int c, size_t n)
return s;
}

-/*
- * This memcpy is overlap safe (i.e. it is memmove without conflicting
- * with other definitions of memmove from the various decompressors.
- */
-void *memcpy(void *dest, const void *src, size_t n)
+void *memmove(void *dest, const void *src, size_t n)
{
unsigned char *d = dest;
const unsigned char *s = src;

if (d <= s || d - s >= n)
- return __memcpy(dest, src, n);
+ return memcpy(dest, src, n);

while (n-- > 0)
d[n] = s[n];
--
2.6.3


--
Kees Cook
Chrome OS & Brillo Security