[PATCH bpf-next v4 3/4] riscv: implement a memset like function for text

From: Puranjay Mohan
Date: Thu Aug 31 2023 - 09:12:46 EST


The BPF JIT needs to write invalid instructions to RX regions of memory to
invalidate removed BPF programs. This needs a function like memset() that
can work with RX memory.

Implement patch_text_set_nosync() which is similar to text_poke_set() of
x86.

Signed-off-by: Puranjay Mohan <puranjay12@xxxxxxxxx>
Reviewed-by: Pu Lehui <pulehui@xxxxxxxxxx>
Acked-by: Björn Töpel <bjorn@xxxxxxxxxx>
Tested-by: Björn Töpel <bjorn@xxxxxxxxxxxx>
---
arch/riscv/include/asm/patch.h | 1 +
arch/riscv/kernel/patch.c | 77 ++++++++++++++++++++++++++++++++++
2 files changed, 78 insertions(+)

diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
index 63c98833d510..e88b52d39eac 100644
--- a/arch/riscv/include/asm/patch.h
+++ b/arch/riscv/include/asm/patch.h
@@ -7,6 +7,7 @@
#define _ASM_RISCV_PATCH_H

int patch_text_nosync(void *addr, const void *insns, size_t len);
+int patch_text_set_nosync(void *addr, u8 c, size_t len);
int patch_text(void *addr, u32 *insns, int ninsns);

extern int riscv_patch_in_stop_machine;
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 2c97e246f4dc..13ee7bf589a1 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -6,6 +6,7 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/memory.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <asm/kprobes.h>
@@ -53,6 +54,39 @@ static void patch_unmap(int fixmap)
}
NOKPROBE_SYMBOL(patch_unmap);

+static int __patch_insn_set(void *addr, u8 c, size_t len)
+{
+ void *waddr = addr;
+ bool across_pages = (((uintptr_t)addr & ~PAGE_MASK) + len) > PAGE_SIZE;
+
+ /*
+ * Only two pages can be mapped at a time for writing.
+ */
+ if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
+ return -EINVAL;
+ /*
+ * Before reaching here, it was expected to lock the text_mutex
+ * already, so we don't need to give another lock here and could
+ * ensure that it was safe between each cores.
+ */
+ lockdep_assert_held(&text_mutex);
+
+ if (across_pages)
+ patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
+
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+ memset(waddr, c, len);
+
+ patch_unmap(FIX_TEXT_POKE0);
+
+ if (across_pages)
+ patch_unmap(FIX_TEXT_POKE1);
+
+ return 0;
+}
+NOKPROBE_SYMBOL(__patch_insn_set);
+
static int __patch_insn_write(void *addr, const void *insn, size_t len)
{
void *waddr = addr;
@@ -95,6 +129,14 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
}
NOKPROBE_SYMBOL(__patch_insn_write);
#else
+static int __patch_insn_set(void *addr, u8 c, size_t len)
+{
+ memset(addr, c, len);
+
+ return 0;
+}
+NOKPROBE_SYMBOL(__patch_insn_set);
+
static int __patch_insn_write(void *addr, const void *insn, size_t len)
{
return copy_to_kernel_nofault(addr, insn, len);
@@ -102,6 +144,41 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
NOKPROBE_SYMBOL(__patch_insn_write);
#endif /* CONFIG_MMU */

+static int patch_insn_set(void *addr, u8 c, size_t len)
+{
+ size_t patched = 0;
+ size_t size;
+ int ret = 0;
+
+ /*
+ * __patch_insn_set() can only work on 2 pages at a time so call it in a
+ * loop with len <= 2 * PAGE_SIZE.
+ */
+ while (patched < len && !ret) {
+ size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched);
+ ret = __patch_insn_set(addr + patched, c, size);
+
+ patched += size;
+ }
+
+ return ret;
+}
+NOKPROBE_SYMBOL(patch_insn_set);
+
+int patch_text_set_nosync(void *addr, u8 c, size_t len)
+{
+ u32 *tp = addr;
+ int ret;
+
+ ret = patch_insn_set(tp, c, len);
+
+ if (!ret)
+ flush_icache_range((uintptr_t)tp, (uintptr_t)tp + len);
+
+ return ret;
+}
+NOKPROBE_SYMBOL(patch_text_set_nosync);
+
static int patch_insn_write(void *addr, const void *insn, size_t len)
{
size_t patched = 0;
--
2.39.2