[PATCH 01/24] Rename .data.cacheline_aligned to .data..cacheline_aligned.

From: Denys Vlasenko
Date: Fri Feb 19 2010 - 19:08:36 EST


From: Tim Abbott <tabbott@xxxxxxxxxxx>

Signed-off-by: Tim Abbott <tabbott@xxxxxxxxxxx>
Cc: Sam Ravnborg <sam@xxxxxxxxxxxx>
Signed-off-by: Denys Vlasenko <vda.linux@xxxxxxxxxxxxxx>
---
arch/powerpc/kernel/vmlinux.lds.S | 2 +-
arch/x86/kernel/init_task.c | 2 +-
include/asm-generic/vmlinux.lds.h | 2 +-
include/linux/cache.h | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index dcd01c8..3229c06 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -231,7 +231,7 @@ SECTIONS
PAGE_ALIGNED_DATA(PAGE_SIZE)
}

- .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
+ .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
}

diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
index 3a54dcb..43e9ccf 100644
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task);
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
- * so they are allowed to end up in the .data.cacheline_aligned
+ * so they are allowed to end up in the .data..cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 67e6520..78450aa 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -189,7 +189,7 @@

#define CACHELINE_ALIGNED_DATA(align) \
. = ALIGN(align); \
- *(.data.cacheline_aligned)
+ *(.data..cacheline_aligned)

#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 97e2488..4c57065 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -31,7 +31,7 @@
#ifndef __cacheline_aligned
#define __cacheline_aligned \
__attribute__((__aligned__(SMP_CACHE_BYTES), \
- __section__(".data.cacheline_aligned")))
+ __section__(".data..cacheline_aligned")))
#endif /* __cacheline_aligned */

#ifndef __cacheline_aligned_in_smp
--
1.6.2.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/