[RFC PATCH v2 2/5] selftests: Add BPF overwritable ring buffer self tests.

From: Francis Laniel
Date: Tue Sep 06 2022 - 16:10:42 EST


Add tests to confirm behavior of overwritable BPF ring buffer, particularly the
oldest data being overwritten by newest ones.

Signed-off-by: Francis Laniel <flaniel@xxxxxxxxxxxxxxxxxxx>
---
tools/testing/selftests/bpf/Makefile | 5 +-
.../bpf/prog_tests/ringbuf_overwritable.c | 158 ++++++++++++++++++
.../bpf/progs/test_ringbuf_overwritable.c | 61 +++++++
3 files changed, 222 insertions(+), 2 deletions(-)
create mode 100644 tools/testing/selftests/bpf/prog_tests/ringbuf_overwritable.c
create mode 100644 tools/testing/selftests/bpf/progs/test_ringbuf_overwritable.c

diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 8d59ec7f4c2d..96e95dcfc982 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -351,8 +351,9 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
test_usdt.skel.h

LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
- test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
- map_ptr_kern.c core_kern.c core_kern_overflow.c
+ test_ringbuf.c test_ringbuf_overwritable.c atomics.c \
+ trace_printk.c trace_vprintk.c map_ptr_kern.c \
+ core_kern.c core_kern_overflow.c
# Generate both light skeleton and libbpf skeleton for these
LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c
SKEL_BLACKLIST += $$(LSKELS)
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_overwritable.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_overwritable.c
new file mode 100644
index 000000000000..b5ec1e62f761
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_overwritable.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+#include <test_progs.h>
+#include <sys/mman.h>
+#include <sys/epoll.h>
+#include <time.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/sysinfo.h>
+#include <linux/perf_event.h>
+#include <linux/ring_buffer.h>
+#include "test_ringbuf_overwritable.lskel.h"
+
+struct sample {
+ int count;
+ /*
+ * filler size will be computed to have 8 samples in a 4096 bytes long
+ * buffer.
+ */
+ char filler[4096 / 8 - sizeof(int) - 8];
+};
+
+struct ring {
+ ring_buffer_sample_fn sample_cb;
+ __u8 overwritable: 1,
+ __reserved: 7;
+ void *ctx;
+ void *data;
+ unsigned long *consumer_pos;
+ unsigned long *producer_pos;
+ unsigned long mask;
+ int map_fd;
+};
+
+struct ring_buffer {
+ struct epoll_event *events;
+ struct ring *rings;
+ size_t page_size;
+ int epoll_fd;
+ int ring_cnt;
+};
+
+static int duration;
+static struct test_ringbuf_overwritable_lskel *skel;
+
+void test_ringbuf_overwritable(void)
+{
+ const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
+ int page_size = getpagesize();
+ int sample_cnt = 0, sample_read = 0;
+ unsigned long mask = page_size - 1;
+ struct ring_buffer *ringbuf;
+ int err, *len_ptr, len;
+ struct sample *sample;
+ long read_pos;
+ void *data_ptr;
+
+ skel = test_ringbuf_overwritable_lskel__open();
+ if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
+ return;
+
+ skel->maps.ringbuf.max_entries = page_size;
+
+ err = test_ringbuf_overwritable_lskel__load(skel);
+ if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
+ goto cleanup;
+
+ /* only trigger BPF program for current process */
+ skel->bss->pid = getpid();
+
+ ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd, NULL, NULL, NULL);
+ if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
+ goto cleanup;
+
+ /* There is only one ring in this ringbuf. */
+ data_ptr = ringbuf->rings[0].data;
+
+ err = test_ringbuf_overwritable_lskel__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
+ goto cleanup;
+
+ /* Trigger one sample. */
+ syscall(__NR_getpgid);
+ sample_cnt++;
+
+ CHECK(skel->bss->avail_data != -EINVAL,
+ "err_avail_size", "exp %d, got %ld\n",
+ -EINVAL, skel->bss->avail_data);
+ CHECK(skel->bss->ring_size != page_size,
+ "err_ring_size", "exp %ld, got %ld\n",
+ (long)page_size, skel->bss->ring_size);
+ CHECK(skel->bss->cons_pos != -EINVAL,
+ "err_cons_pos", "exp %d, got %ld\n",
+ -EINVAL, skel->bss->cons_pos);
+ CHECK(skel->bss->prod_pos != sample_cnt * -rec_sz,
+ "err_prod_pos", "exp %ld, got %ld\n",
+ sample_cnt * -rec_sz, skel->bss->prod_pos);
+
+ len_ptr = data_ptr + (skel->bss->prod_pos & mask);
+ len = smp_load_acquire(len_ptr);
+
+ CHECK(len != sizeof(struct sample),
+ "err_sample_len", "exp %ld, got %d\n",
+ sizeof(struct sample), len);
+
+ sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
+
+ CHECK(sample->count != sample_cnt,
+ "err_sample_cnt", "exp %d, got %d",
+ sample_cnt, sample->count);
+
+ /* Trigger many samples, so we overwrite data */
+ for (int i = 0; i < 16; i++) {
+ syscall(__NR_getpgid);
+ sample_cnt++;
+ }
+
+ CHECK(skel->bss->avail_data != -EINVAL,
+ "err_avail_size", "exp %d, got %ld\n",
+ -EINVAL, skel->bss->avail_data);
+ CHECK(skel->bss->ring_size != page_size,
+ "err_ring_size", "exp %ld, got %ld\n",
+ (long)page_size, skel->bss->ring_size);
+ CHECK(skel->bss->cons_pos != -EINVAL,
+ "err_cons_pos", "exp %d, got %ld\n",
+ -EINVAL, skel->bss->cons_pos);
+ CHECK(skel->bss->prod_pos != sample_cnt * -rec_sz,
+ "err_prod_pos", "exp %ld, got %ld\n",
+ sample_cnt * -rec_sz, skel->bss->prod_pos);
+
+ read_pos = skel->bss->prod_pos;
+ sample_read = 0;
+ while (read_pos - skel->bss->prod_pos < mask) {
+ len_ptr = data_ptr + (read_pos & mask);
+ len = smp_load_acquire(len_ptr);
+
+ sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
+
+ CHECK(sample->count != sample_cnt - sample_read,
+ "err_sample_cnt", "exp %d, got %d",
+ sample_cnt - sample_read, sample->count);
+
+ sample_read++;
+ read_pos += round_up(len + BPF_RINGBUF_HDR_SZ, 8);
+ }
+
+ CHECK(sample_read != page_size / rec_sz,
+ "err_sample_read", "exp %ld, got %d",
+ page_size / rec_sz, sample_read);
+
+ test_ringbuf_overwritable_lskel__detach(skel);
+cleanup:
+ ring_buffer__free(ringbuf);
+ test_ringbuf_overwritable_lskel__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_overwritable.c b/tools/testing/selftests/bpf/progs/test_ringbuf_overwritable.c
new file mode 100644
index 000000000000..e28be35059b7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_overwritable.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct sample {
+ int count;
+ /*
+ * filler size will be computed to have 8 samples in a 4096 bytes long
+ * buffer.
+ */
+ char filler[4096 / 8 - sizeof(int) - BPF_RINGBUF_HDR_SZ];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(map_flags, BFP_F_RB_OVERWRITABLE);
+} ringbuf SEC(".maps");
+
+/* inputs */
+int pid = 0;
+
+/* outputs */
+long avail_data = 0;
+long ring_size = 0;
+long cons_pos = 0;
+long prod_pos = 0;
+
+static int count;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int test_ringbuf_overwritable(void *ctx)
+{
+ int cur_pid = bpf_get_current_pid_tgid() >> 32;
+ struct sample *sample;
+
+ if (cur_pid != pid)
+ return 0;
+
+ sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
+ if (!sample)
+ return 0;
+
+ __sync_fetch_and_add(&count, 1);
+ sample->count = count;
+
+ bpf_printk("count: %d\n", count);
+
+ bpf_ringbuf_submit(sample, 0);
+
+ avail_data = bpf_ringbuf_query(&ringbuf, BPF_RB_AVAIL_DATA);
+ ring_size = bpf_ringbuf_query(&ringbuf, BPF_RB_RING_SIZE);
+ cons_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_CONS_POS);
+ prod_pos = bpf_ringbuf_query(&ringbuf, BPF_RB_PROD_POS);
+
+ return 0;
+}
--
2.25.1