Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2024-09-11 We've added 12 non-merge commits during the last 16 day(s) which contain a total of 20 files changed, 228 insertions(+), 30 deletions(-). There's a minor merge conflict in drivers/net/netkit.c:00d066a4d4("netdev_features: convert NETIF_F_LLTX to dev->lltx")d966087948("netkit: Disable netpoll support") The main changes are: 1) Enable bpf_dynptr_from_skb for tp_btf such that this can be used to easily parse skbs in BPF programs attached to tracepoints, from Philo Lu. 2) Add a cond_resched() point in BPF's sock_hash_free() as there have been several syzbot soft lockup reports recently, from Eric Dumazet. 3) Fix xsk_buff_can_alloc() to account for queue_empty_descs which got noticed when zero copy ice driver started to use it, from Maciej Fijalkowski. 4) Move the xdp:xdp_cpumap_kthread tracepoint before cpumap pushes skbs up via netif_receive_skb_list() to better measure latencies, from Daniel Xu. 5) Follow-up to disable netpoll support from netkit, from Daniel Borkmann. 6) Improve xsk selftests to not assume a fixed MAX_SKB_FRAGS of 17 but instead gather the actual value via /proc/sys/net/core/max_skb_frags, also from Maciej Fijalkowski. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: sock_map: Add a cond_resched() in sock_hash_free() selftests/bpf: Expand skb dynptr selftests for tp_btf bpf: Allow bpf_dynptr_from_skb() for tp_btf tcp: Use skb__nullable in trace_tcp_send_reset selftests/bpf: Add test for __nullable suffix in tp_btf bpf: Support __nullable argument suffix for tp_btf bpf, cpumap: Move xdp:xdp_cpumap_kthread tracepoint before rcv selftests/xsk: Read current MAX_SKB_FRAGS from sysctl knob xsk: Bump xsk_queue::queue_empty_descs in xp_can_alloc() tcp_bpf: Remove an unused parameter for bpf_tcp_ingress() bpf, sockmap: Correct spelling skmsg.c netkit: Disable netpoll support Signed-off-by: Jakub Kicinski <kuba@kernel.org> ==================== Link: https://patch.msgid.link/20240911211525.13834-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -6525,6 +6525,9 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
||||
if (prog_args_trusted(prog))
|
||||
info->reg_type |= PTR_TRUSTED;
|
||||
|
||||
if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
|
||||
info->reg_type |= PTR_MAYBE_NULL;
|
||||
|
||||
if (tgt_prog) {
|
||||
enum bpf_prog_type tgt_type;
|
||||
|
||||
|
||||
+4
-2
@@ -354,12 +354,14 @@ static int cpu_map_kthread_run(void *data)
|
||||
|
||||
list_add_tail(&skb->list, &list);
|
||||
}
|
||||
netif_receive_skb_list(&list);
|
||||
|
||||
/* Feedback loop via tracepoint */
|
||||
/* Feedback loop via tracepoint.
|
||||
* NB: keep before recv to allow measuring enqueue/dequeue latency.
|
||||
*/
|
||||
trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
|
||||
sched, &stats);
|
||||
|
||||
netif_receive_skb_list(&list);
|
||||
local_bh_enable(); /* resched point, may call do_softirq() */
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
+32
-4
@@ -28,6 +28,8 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/bpf_mem_alloc.h>
|
||||
#include <net/xdp.h>
|
||||
#include <linux/trace_events.h>
|
||||
#include <linux/kallsyms.h>
|
||||
|
||||
#include "disasm.h"
|
||||
|
||||
@@ -21154,11 +21156,13 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
|
||||
{
|
||||
bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
|
||||
bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING;
|
||||
char trace_symbol[KSYM_SYMBOL_LEN];
|
||||
const char prefix[] = "btf_trace_";
|
||||
struct bpf_raw_event_map *btp;
|
||||
int ret = 0, subprog = -1, i;
|
||||
const struct btf_type *t;
|
||||
bool conservative = true;
|
||||
const char *tname;
|
||||
const char *tname, *fname;
|
||||
struct btf *btf;
|
||||
long addr = 0;
|
||||
struct module *mod = NULL;
|
||||
@@ -21289,10 +21293,34 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
|
||||
return -EINVAL;
|
||||
}
|
||||
tname += sizeof(prefix) - 1;
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (!btf_type_is_ptr(t))
|
||||
/* should never happen in valid vmlinux build */
|
||||
|
||||
/* The func_proto of "btf_trace_##tname" is generated from typedef without argument
|
||||
* names. Thus using bpf_raw_event_map to get argument names.
|
||||
*/
|
||||
btp = bpf_get_raw_tracepoint(tname);
|
||||
if (!btp)
|
||||
return -EINVAL;
|
||||
fname = kallsyms_lookup((unsigned long)btp->bpf_func, NULL, NULL, NULL,
|
||||
trace_symbol);
|
||||
bpf_put_raw_tracepoint(btp);
|
||||
|
||||
if (fname)
|
||||
ret = btf_find_by_name_kind(btf, fname, BTF_KIND_FUNC);
|
||||
|
||||
if (!fname || ret < 0) {
|
||||
bpf_log(log, "Cannot find btf of tracepoint template, fall back to %s%s.\n",
|
||||
prefix, tname);
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (!btf_type_is_ptr(t))
|
||||
/* should never happen in valid vmlinux build */
|
||||
return -EINVAL;
|
||||
} else {
|
||||
t = btf_type_by_id(btf, ret);
|
||||
if (!btf_type_is_func(t))
|
||||
/* should never happen in valid vmlinux build */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (!btf_type_is_func_proto(t))
|
||||
/* should never happen in valid vmlinux build */
|
||||
|
||||
Reference in New Issue
Block a user