[RFC][PATCH 15/17] x86/cpu: Rename srso_(.*)_alias to srso_alias_\1

From: Peter Zijlstra
Date: Wed Aug 09 2023 - 03:27:06 EST


For a more consistent namespace.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
arch/x86/include/asm/nospec-branch.h | 2 +-
arch/x86/kernel/vmlinux.lds.S | 8 ++++----
arch/x86/lib/retpoline.S | 24 ++++++++++++------------
3 files changed, 17 insertions(+), 17 deletions(-)

--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -346,7 +346,7 @@ extern void srso_alias_return_thunk(void

extern void zen_untrain_ret(void);
extern void srso_untrain_ret(void);
-extern void srso_untrain_ret_alias(void);
+extern void srso_alias_untrain_ret(void);

extern void entry_untrain_ret(void);
extern void entry_ibpb(void);
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -149,10 +149,10 @@ SECTIONS

#ifdef CONFIG_CPU_UNRET_ENTRY
/*
- * See the comment above srso_untrain_ret_alias()'s
+ * See the comment above srso_alias_untrain_ret()'s
* definition.
*/
- . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
+ . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
*(.text.__x86.rethunk_safe)
#endif
ALIGN_ENTRY_TEXT_END
@@ -529,8 +529,8 @@ INIT_PER_CPU(irq_stack_backing_store);
* GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR
* of the two function addresses:
*/
-. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) -
- (srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
+. = ASSERT(((srso_alias_untrain_ret | srso_alias_safe_ret) -
+ (srso_alias_untrain_ret & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
"SRSO function pair won't alias");
#endif

--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -134,41 +134,41 @@ SYM_CODE_END(__x86_indirect_jump_thunk_a

#ifdef CONFIG_CPU_UNRET_ENTRY
/*
- * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
+ * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
* special addresses:
*
- * - srso_untrain_ret_alias() is 2M aligned
- * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
+ * - srso_alias_untrain_ret() is 2M aligned
+ * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
* and 20 in its virtual address are set (while those bits in the
- * srso_untrain_ret_alias() function are cleared).
+ * srso_alias_untrain_ret() function are cleared).
*
* This guarantees that those two addresses will alias in the branch
* target buffer of Zen3/4 generations, leading to any potential
* poisoned entries at that BTB slot to get evicted.
*
- * As a result, srso_safe_ret_alias() becomes a safe return.
+ * As a result, srso_alias_safe_ret() becomes a safe return.
*/
.section .text.__x86.rethunk_untrain

-SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
+SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ASM_NOP2
lfence
jmp srso_alias_return_thunk
-SYM_FUNC_END(srso_untrain_ret_alias)
-__EXPORT_THUNK(srso_untrain_ret_alias)
+SYM_FUNC_END(srso_alias_untrain_ret)
+__EXPORT_THUNK(srso_alias_untrain_ret)

.section .text.__x86.rethunk_safe

/* Needs a definition for the zen_return_thunk alternative below. */
-SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
+SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
add $8, %_ASM_SP
UNWIND_HINT_FUNC
ANNOTATE_UNRET_SAFE
ret
int3
-SYM_FUNC_END(srso_safe_ret_alias)
+SYM_FUNC_END(srso_alias_safe_ret)

.section .text.__x86.return_thunk

@@ -266,7 +266,7 @@ __EXPORT_THUNK(srso_untrain_ret)
SYM_FUNC_START(entry_untrain_ret)
ALTERNATIVE_2 "jmp zen_untrain_ret", \
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \
- "jmp srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
+ "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)

@@ -284,7 +284,7 @@ SYM_CODE_END(srso_return_thunk)
SYM_CODE_START(srso_alias_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
- call srso_safe_ret_alias
+ call srso_alias_safe_ret
ud2
SYM_CODE_END(srso_alias_return_thunk)