[PATCH v2 2/2] x86/kprobes: Fix optprobe optimization check with CONFIG_RETHUNK

From: Masami Hiramatsu (Google)
Date: Wed Sep 07 2022 - 21:35:17 EST


From: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>

Since the CONFIG_RETHUNK and CONFIG_SLS will use INT3 for stopping
speculative execution after function return, kprobe jump optimization
always fails on the functions with such INT3 inside the function body.
(It already checks the INT3 padding between functions, but not inside
the function)

To avoid this issue, as same as kprobes, decoding the all code blocks
in the function to check the kprobe can be optimized.

Signed-off-by: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>
Suggested-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Fixes: e463a09af2f0 ("x86: Add straight-line-speculation mitigation")
Cc: stable@xxxxxxxxxxxxxxx
---
Changes in v2:
- Reuse the kprobes decoding function.
---
arch/x86/kernel/kprobes/opt.c | 73 +++++++++++++----------------------------
1 file changed, 24 insertions(+), 49 deletions(-)

diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 2e41850cab06..14f8d2c6630a 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -260,25 +260,36 @@ static int insn_is_indirect_jump(struct insn *insn)
return ret;
}

-static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
+static int optimize_check_cb(struct insn *insn, void *data)
{
- unsigned char ops;
+ unsigned long paddr = (unsigned long)data;

- for (; addr < eaddr; addr++) {
- if (get_kernel_nofault(ops, (void *)addr) < 0 ||
- ops != INT3_INSN_OPCODE)
- return false;
- }
+ if (search_exception_tables((unsigned long)insn->kaddr))
+ /*
+ * Since some fixup code will jumps into this function,
+ * we can't optimize kprobe in this function.
+ */
+ return 1;
+
+ /* Check any instructions don't jump into target */
+ if (insn_is_indirect_jump(insn) ||
+ insn_jump_into_range(insn, paddr + INT3_INSN_SIZE,
+ DISP32_SIZE))
+ return 1;
+
+ /* Check whether an INT3 is involved. */
+ if (insn->opcode.bytes[0] == INT3_INSN_OPCODE &&
+ paddr <= (unsigned long)insn->kaddr &&
+ (unsigned long)insn->kaddr <= paddr + JMP32_INSN_SIZE)
+ return 1;

- return true;
+ return 0;
}

/* Decode whole function to ensure any instructions don't jump into target */
static int can_optimize(unsigned long paddr)
{
- unsigned long addr, size = 0, offset = 0;
- struct insn insn;
- kprobe_opcode_t buf[MAX_INSN_SIZE];
+ unsigned long size = 0, offset = 0;

/* Lookup symbol including addr */
if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
@@ -296,44 +307,8 @@ static int can_optimize(unsigned long paddr)
if (size - offset < JMP32_INSN_SIZE)
return 0;

- /* Decode instructions */
- addr = paddr - offset;
- while (addr < paddr - offset + size) { /* Decode until function end */
- unsigned long recovered_insn;
- int ret;
-
- if (search_exception_tables(addr))
- /*
- * Since some fixup code will jumps into this function,
- * we can't optimize kprobe in this function.
- */
- return 0;
- recovered_insn = recover_probed_instruction(buf, addr);
- if (!recovered_insn)
- return 0;
-
- ret = insn_decode_kernel(&insn, (void *)recovered_insn);
- if (ret < 0)
- return 0;
-
- /*
- * In the case of detecting unknown breakpoint, this could be
- * a padding INT3 between functions. Let's check that all the
- * rest of the bytes are also INT3.
- */
- if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
- return is_padding_int3(addr, paddr - offset + size) ? 1 : 0;
-
- /* Recover address */
- insn.kaddr = (void *)addr;
- insn.next_byte = (void *)(addr + insn.length);
- /* Check any instructions don't jump into target */
- if (insn_is_indirect_jump(&insn) ||
- insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
- DISP32_SIZE))
- return 0;
- addr += insn.length;
- }
+ if (every_insn_in_func(paddr, optimize_check_cb, (void *)paddr))
+ return 0;

return 1;
}