selftests/bpf: Add tests for arena spin lock
Add some basic selftests for qspinlock built over BPF arena using cond_break_label macro. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20250306035431.2186189-4-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
88d706ba7c
commit
2dfc8186d6
@@ -0,0 +1,108 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
#include <sys/sysinfo.h>
|
||||
|
||||
struct qspinlock { int val; };
|
||||
typedef struct qspinlock arena_spinlock_t;
|
||||
|
||||
struct arena_qnode {
|
||||
unsigned long next;
|
||||
int count;
|
||||
int locked;
|
||||
};
|
||||
|
||||
#include "arena_spin_lock.skel.h"
|
||||
|
||||
static long cpu;
|
||||
static int repeat;
|
||||
|
||||
pthread_barrier_t barrier;
|
||||
|
||||
static void *spin_lock_thread(void *arg)
|
||||
{
|
||||
int err, prog_fd = *(u32 *)arg;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = repeat,
|
||||
);
|
||||
cpu_set_t cpuset;
|
||||
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
|
||||
ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset), "cpu affinity");
|
||||
|
||||
err = pthread_barrier_wait(&barrier);
|
||||
if (err != PTHREAD_BARRIER_SERIAL_THREAD && err != 0)
|
||||
ASSERT_FALSE(true, "pthread_barrier");
|
||||
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
ASSERT_OK(err, "test_run err");
|
||||
ASSERT_EQ((int)topts.retval, 0, "test_run retval");
|
||||
|
||||
pthread_exit(arg);
|
||||
}
|
||||
|
||||
static void test_arena_spin_lock_size(int size)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
struct arena_spin_lock *skel;
|
||||
pthread_t thread_id[16];
|
||||
int prog_fd, i, err;
|
||||
void *ret;
|
||||
|
||||
if (get_nprocs() < 2) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
|
||||
skel = arena_spin_lock__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "arena_spin_lock__open_and_load"))
|
||||
return;
|
||||
if (skel->data->test_skip == 2) {
|
||||
test__skip();
|
||||
goto end;
|
||||
}
|
||||
skel->bss->cs_count = size;
|
||||
skel->bss->limit = repeat * 16;
|
||||
|
||||
ASSERT_OK(pthread_barrier_init(&barrier, NULL, 16), "barrier init");
|
||||
|
||||
prog_fd = bpf_program__fd(skel->progs.prog);
|
||||
for (i = 0; i < 16; i++) {
|
||||
err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
|
||||
if (!ASSERT_OK(err, "pthread_create"))
|
||||
goto end_barrier;
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
|
||||
goto end_barrier;
|
||||
if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
|
||||
goto end_barrier;
|
||||
}
|
||||
|
||||
ASSERT_EQ(skel->bss->counter, repeat * 16, "check counter value");
|
||||
|
||||
end_barrier:
|
||||
pthread_barrier_destroy(&barrier);
|
||||
end:
|
||||
arena_spin_lock__destroy(skel);
|
||||
return;
|
||||
}
|
||||
|
||||
void test_arena_spin_lock(void)
|
||||
{
|
||||
repeat = 1000;
|
||||
if (test__start_subtest("arena_spin_lock_1"))
|
||||
test_arena_spin_lock_size(1);
|
||||
cpu = 0;
|
||||
if (test__start_subtest("arena_spin_lock_1000"))
|
||||
test_arena_spin_lock_size(1000);
|
||||
cpu = 0;
|
||||
repeat = 100;
|
||||
if (test__start_subtest("arena_spin_lock_50000"))
|
||||
test_arena_spin_lock_size(50000);
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
#include "bpf_arena_spin_lock.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARENA);
|
||||
__uint(map_flags, BPF_F_MMAPABLE);
|
||||
__uint(max_entries, 100); /* number of pages */
|
||||
#ifdef __TARGET_ARCH_arm64
|
||||
__ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
|
||||
#else
|
||||
__ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
|
||||
#endif
|
||||
} arena SEC(".maps");
|
||||
|
||||
int cs_count;
|
||||
|
||||
#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
|
||||
arena_spinlock_t __arena lock;
|
||||
int test_skip = 1;
|
||||
#else
|
||||
int test_skip = 2;
|
||||
#endif
|
||||
|
||||
int counter;
|
||||
int limit;
|
||||
|
||||
SEC("tc")
|
||||
int prog(void *ctx)
|
||||
{
|
||||
int ret = -2;
|
||||
|
||||
#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
|
||||
unsigned long flags;
|
||||
|
||||
if ((ret = arena_spin_lock_irqsave(&lock, flags)))
|
||||
return ret;
|
||||
if (counter != limit)
|
||||
counter++;
|
||||
bpf_repeat(cs_count);
|
||||
ret = 0;
|
||||
arena_spin_unlock_irqrestore(&lock, flags);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
Reference in New Issue
Block a user