Merge tag 'bpf-next-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Pull bpf updates from Alexei Starovoitov:
- Fix and improve BTF deduplication of identical BTF types (Alan
Maguire and Andrii Nakryiko)
- Support up to 12 arguments in BPF trampoline on arm64 (Xu Kuohai and
Alexis Lothoré)
- Support load-acquire and store-release instructions in BPF JIT on
riscv64 (Andrea Parri)
- Fix uninitialized values in BPF_{CORE,PROBE}_READ macros (Anton
Protopopov)
- Streamline allowed helpers across program types (Feng Yang)
- Support atomic update for hashtab of BPF maps (Hou Tao)
- Implement json output for BPF helpers (Ihor Solodrai)
- Several s390 JIT fixes (Ilya Leoshkevich)
- Various sockmap fixes (Jiayuan Chen)
- Support mmap of vmlinux BTF data (Lorenz Bauer)
- Support BPF rbtree traversal and list peeking (Martin KaFai Lau)
- Tests for sockmap/sockhash redirection (Michal Luczaj)
- Introduce kfuncs for memory reads into dynptrs (Mykyta Yatsenko)
- Add support for dma-buf iterators in BPF (T.J. Mercier)
- The verifier support for __bpf_trap() (Yonghong Song)
* tag 'bpf-next-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (135 commits)
bpf, arm64: Remove unused-but-set function and variable.
selftests/bpf: Add tests with stack ptr register in conditional jmp
bpf: Do not include stack ptr register in precision backtracking bookkeeping
selftests/bpf: enable many-args tests for arm64
bpf, arm64: Support up to 12 function arguments
bpf: Check rcu_read_lock_trace_held() in bpf_map_lookup_percpu_elem()
bpf: Avoid __bpf_prog_ret0_warn when jit fails
bpftool: Add support for custom BTF path in prog load/loadall
selftests/bpf: Add unit tests with __bpf_trap() kfunc
bpf: Warn with __bpf_trap() kfunc maybe due to uninitialized variable
bpf: Remove special_kfunc_set from verifier
selftests/bpf: Add test for open coded dmabuf_iter
selftests/bpf: Add test for dmabuf_iter
bpf: Add open coded dmabuf iterator
bpf: Add dmabuf iterator
dma-buf: Rename debugfs symbols
bpf: Fix error return value in bpf_copy_from_user_dynptr
libbpf: Use mmap to parse vmlinux BTF from sysfs
selftests: bpf: Add a test for mmapable vmlinux BTF
btf: Allow mmap of vmlinux btf
...
This commit is contained in:
+211
-110
@@ -572,7 +572,7 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
|
||||
return value;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_perf_event_read_proto = {
|
||||
const struct bpf_func_proto bpf_perf_event_read_proto = {
|
||||
.func = bpf_perf_event_read,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
@@ -882,7 +882,7 @@ BPF_CALL_1(bpf_send_signal, u32, sig)
|
||||
return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_send_signal_proto = {
|
||||
const struct bpf_func_proto bpf_send_signal_proto = {
|
||||
.func = bpf_send_signal,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
@@ -894,7 +894,7 @@ BPF_CALL_1(bpf_send_signal_thread, u32, sig)
|
||||
return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_send_signal_thread_proto = {
|
||||
const struct bpf_func_proto bpf_send_signal_thread_proto = {
|
||||
.func = bpf_send_signal_thread,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
@@ -1185,7 +1185,7 @@ BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
|
||||
return entry_cnt * br_entry_size;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
|
||||
const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
|
||||
.func = bpf_get_branch_snapshot,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
@@ -1430,56 +1430,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
const struct bpf_func_proto *func_proto;
|
||||
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_map_lookup_elem:
|
||||
return &bpf_map_lookup_elem_proto;
|
||||
case BPF_FUNC_map_update_elem:
|
||||
return &bpf_map_update_elem_proto;
|
||||
case BPF_FUNC_map_delete_elem:
|
||||
return &bpf_map_delete_elem_proto;
|
||||
case BPF_FUNC_map_push_elem:
|
||||
return &bpf_map_push_elem_proto;
|
||||
case BPF_FUNC_map_pop_elem:
|
||||
return &bpf_map_pop_elem_proto;
|
||||
case BPF_FUNC_map_peek_elem:
|
||||
return &bpf_map_peek_elem_proto;
|
||||
case BPF_FUNC_map_lookup_percpu_elem:
|
||||
return &bpf_map_lookup_percpu_elem_proto;
|
||||
case BPF_FUNC_ktime_get_ns:
|
||||
return &bpf_ktime_get_ns_proto;
|
||||
case BPF_FUNC_ktime_get_boot_ns:
|
||||
return &bpf_ktime_get_boot_ns_proto;
|
||||
case BPF_FUNC_tail_call:
|
||||
return &bpf_tail_call_proto;
|
||||
case BPF_FUNC_get_current_task:
|
||||
return &bpf_get_current_task_proto;
|
||||
case BPF_FUNC_get_current_task_btf:
|
||||
return &bpf_get_current_task_btf_proto;
|
||||
case BPF_FUNC_task_pt_regs:
|
||||
return &bpf_task_pt_regs_proto;
|
||||
case BPF_FUNC_get_current_uid_gid:
|
||||
return &bpf_get_current_uid_gid_proto;
|
||||
case BPF_FUNC_get_current_comm:
|
||||
return &bpf_get_current_comm_proto;
|
||||
case BPF_FUNC_trace_printk:
|
||||
return bpf_get_trace_printk_proto();
|
||||
case BPF_FUNC_get_smp_processor_id:
|
||||
return &bpf_get_smp_processor_id_proto;
|
||||
case BPF_FUNC_get_numa_node_id:
|
||||
return &bpf_get_numa_node_id_proto;
|
||||
case BPF_FUNC_perf_event_read:
|
||||
return &bpf_perf_event_read_proto;
|
||||
case BPF_FUNC_get_prandom_u32:
|
||||
return &bpf_get_prandom_u32_proto;
|
||||
case BPF_FUNC_probe_read_user:
|
||||
return &bpf_probe_read_user_proto;
|
||||
case BPF_FUNC_probe_read_kernel:
|
||||
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
|
||||
NULL : &bpf_probe_read_kernel_proto;
|
||||
case BPF_FUNC_probe_read_user_str:
|
||||
return &bpf_probe_read_user_str_proto;
|
||||
case BPF_FUNC_probe_read_kernel_str:
|
||||
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
|
||||
NULL : &bpf_probe_read_kernel_str_proto;
|
||||
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
||||
case BPF_FUNC_probe_read:
|
||||
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
|
||||
@@ -1488,65 +1440,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
|
||||
NULL : &bpf_probe_read_compat_str_proto;
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUPS
|
||||
case BPF_FUNC_cgrp_storage_get:
|
||||
return &bpf_cgrp_storage_get_proto;
|
||||
case BPF_FUNC_cgrp_storage_delete:
|
||||
return &bpf_cgrp_storage_delete_proto;
|
||||
case BPF_FUNC_current_task_under_cgroup:
|
||||
return &bpf_current_task_under_cgroup_proto;
|
||||
#endif
|
||||
case BPF_FUNC_send_signal:
|
||||
return &bpf_send_signal_proto;
|
||||
case BPF_FUNC_send_signal_thread:
|
||||
return &bpf_send_signal_thread_proto;
|
||||
case BPF_FUNC_perf_event_read_value:
|
||||
return &bpf_perf_event_read_value_proto;
|
||||
case BPF_FUNC_ringbuf_output:
|
||||
return &bpf_ringbuf_output_proto;
|
||||
case BPF_FUNC_ringbuf_reserve:
|
||||
return &bpf_ringbuf_reserve_proto;
|
||||
case BPF_FUNC_ringbuf_submit:
|
||||
return &bpf_ringbuf_submit_proto;
|
||||
case BPF_FUNC_ringbuf_discard:
|
||||
return &bpf_ringbuf_discard_proto;
|
||||
case BPF_FUNC_ringbuf_query:
|
||||
return &bpf_ringbuf_query_proto;
|
||||
case BPF_FUNC_jiffies64:
|
||||
return &bpf_jiffies64_proto;
|
||||
case BPF_FUNC_get_task_stack:
|
||||
return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
|
||||
: &bpf_get_task_stack_proto;
|
||||
case BPF_FUNC_copy_from_user:
|
||||
return &bpf_copy_from_user_proto;
|
||||
case BPF_FUNC_copy_from_user_task:
|
||||
return &bpf_copy_from_user_task_proto;
|
||||
case BPF_FUNC_snprintf_btf:
|
||||
return &bpf_snprintf_btf_proto;
|
||||
case BPF_FUNC_per_cpu_ptr:
|
||||
return &bpf_per_cpu_ptr_proto;
|
||||
case BPF_FUNC_this_cpu_ptr:
|
||||
return &bpf_this_cpu_ptr_proto;
|
||||
case BPF_FUNC_task_storage_get:
|
||||
if (bpf_prog_check_recur(prog))
|
||||
return &bpf_task_storage_get_recur_proto;
|
||||
return &bpf_task_storage_get_proto;
|
||||
case BPF_FUNC_task_storage_delete:
|
||||
if (bpf_prog_check_recur(prog))
|
||||
return &bpf_task_storage_delete_recur_proto;
|
||||
return &bpf_task_storage_delete_proto;
|
||||
case BPF_FUNC_for_each_map_elem:
|
||||
return &bpf_for_each_map_elem_proto;
|
||||
case BPF_FUNC_snprintf:
|
||||
return &bpf_snprintf_proto;
|
||||
case BPF_FUNC_get_func_ip:
|
||||
return &bpf_get_func_ip_proto_tracing;
|
||||
case BPF_FUNC_get_branch_snapshot:
|
||||
return &bpf_get_branch_snapshot_proto;
|
||||
case BPF_FUNC_find_vma:
|
||||
return &bpf_find_vma_proto;
|
||||
case BPF_FUNC_trace_vprintk:
|
||||
return bpf_get_trace_vprintk_proto();
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -1858,7 +1753,7 @@ static struct pt_regs *get_bpf_raw_tp_regs(void)
|
||||
struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
|
||||
int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
|
||||
|
||||
if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
|
||||
if (nest_level > ARRAY_SIZE(tp_regs->regs)) {
|
||||
this_cpu_dec(bpf_raw_tp_nest_level);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
@@ -2987,6 +2882,9 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
|
||||
if (sizeof(u64) != sizeof(void *))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (attr->link_create.flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_kprobe_multi(prog))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -3376,6 +3274,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
|
||||
if (sizeof(u64) != sizeof(void *))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (attr->link_create.flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_uprobe_multi(prog))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -3417,7 +3318,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
|
||||
}
|
||||
|
||||
if (pid) {
|
||||
rcu_read_lock();
|
||||
task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
|
||||
rcu_read_unlock();
|
||||
if (!task) {
|
||||
err = -ESRCH;
|
||||
goto error_path_put;
|
||||
@@ -3565,6 +3468,146 @@ static int __init bpf_kprobe_multi_kfuncs_init(void)
|
||||
|
||||
late_initcall(bpf_kprobe_multi_kfuncs_init);
|
||||
|
||||
typedef int (*copy_fn_t)(void *dst, const void *src, u32 size, struct task_struct *tsk);
|
||||
|
||||
/*
|
||||
* The __always_inline is to make sure the compiler doesn't
|
||||
* generate indirect calls into callbacks, which is expensive,
|
||||
* on some kernel configurations. This allows compiler to put
|
||||
* direct calls into all the specific callback implementations
|
||||
* (copy_user_data_sleepable, copy_user_data_nofault, and so on)
|
||||
*/
|
||||
static __always_inline int __bpf_dynptr_copy_str(struct bpf_dynptr *dptr, u32 doff, u32 size,
|
||||
const void *unsafe_src,
|
||||
copy_fn_t str_copy_fn,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
struct bpf_dynptr_kern *dst;
|
||||
u32 chunk_sz, off;
|
||||
void *dst_slice;
|
||||
int cnt, err;
|
||||
char buf[256];
|
||||
|
||||
dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size);
|
||||
if (likely(dst_slice))
|
||||
return str_copy_fn(dst_slice, unsafe_src, size, tsk);
|
||||
|
||||
dst = (struct bpf_dynptr_kern *)dptr;
|
||||
if (bpf_dynptr_check_off_len(dst, doff, size))
|
||||
return -E2BIG;
|
||||
|
||||
for (off = 0; off < size; off += chunk_sz - 1) {
|
||||
chunk_sz = min_t(u32, sizeof(buf), size - off);
|
||||
/* Expect str_copy_fn to return count of copied bytes, including
|
||||
* zero terminator. Next iteration increment off by chunk_sz - 1 to
|
||||
* overwrite NUL.
|
||||
*/
|
||||
cnt = str_copy_fn(buf, unsafe_src + off, chunk_sz, tsk);
|
||||
if (cnt < 0)
|
||||
return cnt;
|
||||
err = __bpf_dynptr_write(dst, doff + off, buf, cnt, 0);
|
||||
if (err)
|
||||
return err;
|
||||
if (cnt < chunk_sz || chunk_sz == 1) /* we are done */
|
||||
return off + cnt;
|
||||
}
|
||||
return off;
|
||||
}
|
||||
|
||||
static __always_inline int __bpf_dynptr_copy(const struct bpf_dynptr *dptr, u32 doff,
|
||||
u32 size, const void *unsafe_src,
|
||||
copy_fn_t copy_fn, struct task_struct *tsk)
|
||||
{
|
||||
struct bpf_dynptr_kern *dst;
|
||||
void *dst_slice;
|
||||
char buf[256];
|
||||
u32 off, chunk_sz;
|
||||
int err;
|
||||
|
||||
dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size);
|
||||
if (likely(dst_slice))
|
||||
return copy_fn(dst_slice, unsafe_src, size, tsk);
|
||||
|
||||
dst = (struct bpf_dynptr_kern *)dptr;
|
||||
if (bpf_dynptr_check_off_len(dst, doff, size))
|
||||
return -E2BIG;
|
||||
|
||||
for (off = 0; off < size; off += chunk_sz) {
|
||||
chunk_sz = min_t(u32, sizeof(buf), size - off);
|
||||
err = copy_fn(buf, unsafe_src + off, chunk_sz, tsk);
|
||||
if (err)
|
||||
return err;
|
||||
err = __bpf_dynptr_write(dst, doff + off, buf, chunk_sz, 0);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline int copy_user_data_nofault(void *dst, const void *unsafe_src,
|
||||
u32 size, struct task_struct *tsk)
|
||||
{
|
||||
return copy_from_user_nofault(dst, (const void __user *)unsafe_src, size);
|
||||
}
|
||||
|
||||
static __always_inline int copy_user_data_sleepable(void *dst, const void *unsafe_src,
|
||||
u32 size, struct task_struct *tsk)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!tsk) { /* Read from the current task */
|
||||
ret = copy_from_user(dst, (const void __user *)unsafe_src, size);
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = access_process_vm(tsk, (unsigned long)unsafe_src, dst, size, 0);
|
||||
if (ret != size)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline int copy_kernel_data_nofault(void *dst, const void *unsafe_src,
|
||||
u32 size, struct task_struct *tsk)
|
||||
{
|
||||
return copy_from_kernel_nofault(dst, unsafe_src, size);
|
||||
}
|
||||
|
||||
static __always_inline int copy_user_str_nofault(void *dst, const void *unsafe_src,
|
||||
u32 size, struct task_struct *tsk)
|
||||
{
|
||||
return strncpy_from_user_nofault(dst, (const void __user *)unsafe_src, size);
|
||||
}
|
||||
|
||||
static __always_inline int copy_user_str_sleepable(void *dst, const void *unsafe_src,
|
||||
u32 size, struct task_struct *tsk)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (unlikely(size == 0))
|
||||
return 0;
|
||||
|
||||
if (tsk) {
|
||||
ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_src, dst, size, 0);
|
||||
} else {
|
||||
ret = strncpy_from_user(dst, (const void __user *)unsafe_src, size - 1);
|
||||
/* strncpy_from_user does not guarantee NUL termination */
|
||||
if (ret >= 0)
|
||||
((char *)dst)[ret] = '\0';
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return ret + 1;
|
||||
}
|
||||
|
||||
static __always_inline int copy_kernel_str_nofault(void *dst, const void *unsafe_src,
|
||||
u32 size, struct task_struct *tsk)
|
||||
{
|
||||
return strncpy_from_kernel_nofault(dst, unsafe_src, size);
|
||||
}
|
||||
|
||||
__bpf_kfunc_start_defs();
|
||||
|
||||
__bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type,
|
||||
@@ -3576,4 +3619,62 @@ __bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid
|
||||
return bpf_send_signal_common(sig, type, task, value);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_probe_read_user_dynptr(struct bpf_dynptr *dptr, u32 off,
|
||||
u32 size, const void __user *unsafe_ptr__ign)
|
||||
{
|
||||
return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign,
|
||||
copy_user_data_nofault, NULL);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_probe_read_kernel_dynptr(struct bpf_dynptr *dptr, u32 off,
|
||||
u32 size, const void *unsafe_ptr__ign)
|
||||
{
|
||||
return __bpf_dynptr_copy(dptr, off, size, unsafe_ptr__ign,
|
||||
copy_kernel_data_nofault, NULL);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_probe_read_user_str_dynptr(struct bpf_dynptr *dptr, u32 off,
|
||||
u32 size, const void __user *unsafe_ptr__ign)
|
||||
{
|
||||
return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign,
|
||||
copy_user_str_nofault, NULL);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_probe_read_kernel_str_dynptr(struct bpf_dynptr *dptr, u32 off,
|
||||
u32 size, const void *unsafe_ptr__ign)
|
||||
{
|
||||
return __bpf_dynptr_copy_str(dptr, off, size, unsafe_ptr__ign,
|
||||
copy_kernel_str_nofault, NULL);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_copy_from_user_dynptr(struct bpf_dynptr *dptr, u32 off,
|
||||
u32 size, const void __user *unsafe_ptr__ign)
|
||||
{
|
||||
return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign,
|
||||
copy_user_data_sleepable, NULL);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_copy_from_user_str_dynptr(struct bpf_dynptr *dptr, u32 off,
|
||||
u32 size, const void __user *unsafe_ptr__ign)
|
||||
{
|
||||
return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign,
|
||||
copy_user_str_sleepable, NULL);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_copy_from_user_task_dynptr(struct bpf_dynptr *dptr, u32 off,
|
||||
u32 size, const void __user *unsafe_ptr__ign,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign,
|
||||
copy_user_data_sleepable, tsk);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_copy_from_user_task_str_dynptr(struct bpf_dynptr *dptr, u32 off,
|
||||
u32 size, const void __user *unsafe_ptr__ign,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign,
|
||||
copy_user_str_sleepable, tsk);
|
||||
}
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
@@ -1489,7 +1489,7 @@ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
|
||||
: BPF_FD_TYPE_UPROBE;
|
||||
*filename = tu->filename;
|
||||
*probe_offset = tu->offset;
|
||||
*probe_addr = 0;
|
||||
*probe_addr = tu->ref_ctr_offset;
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
Reference in New Issue
Block a user