Martin KaFai Lau says:

====================
pull-request: bpf-next 2025-07-17

We've added 13 non-merge commits during the last 20 day(s) which contain
a total of 4 files changed, 712 insertions(+), 84 deletions(-).

The main changes are:

1) Avoid skipping or repeating a sk when using a TCP bpf_iter,
   from Jordan Rife.

2) Clarify the driver requirement on using the XDP metadata,
   from Song Yoong Siang

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next:
  doc: xdp: Clarify driver implementation for XDP Rx metadata
  selftests/bpf: Add tests for bucket resume logic in established sockets
  selftests/bpf: Create iter_tcp_destroy test program
  selftests/bpf: Create established sockets in socket iterator tests
  selftests/bpf: Make ehash buckets configurable in socket iterator tests
  selftests/bpf: Allow for iteration over multiple states
  selftests/bpf: Allow for iteration over multiple ports
  selftests/bpf: Add tests for bucket resume logic in listening sockets
  bpf: tcp: Avoid socket skips and repeats during iteration
  bpf: tcp: Use bpf_tcp_iter_batch_item for bpf_tcp_iter_state batch items
  bpf: tcp: Get rid of st_bucket_done
  bpf: tcp: Make sure iter->batch always contains a full bucket snapshot
  bpf: tcp: Make mem flags configurable through bpf_iter_tcp_realloc_batch
====================

Link: https://patch.msgid.link/20250717191731.4142326-1-martin.lau@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-07-17 18:07:37 -07:00
4 changed files with 714 additions and 86 deletions
@@ -120,6 +120,39 @@ It is possible to query which kfunc the particular netdev implements via
netlink. See ``xdp-rx-metadata-features`` attribute set in
``Documentation/netlink/specs/netdev.yaml``.
Driver Implementation
=====================
Certain devices may prepend metadata to received packets. However, as of now,
``AF_XDP`` lacks the ability to communicate the size of the ``data_meta`` area
to the consumer. Therefore, it is the responsibility of the driver to copy any
device-reserved metadata out from the metadata area and ensure that
``xdp_buff->data_meta`` is pointing to ``xdp_buff->data`` before presenting the
frame to the XDP program. This is necessary so that, after the XDP program
adjusts the metadata area, the consumer can reliably retrieve the metadata
address using ``METADATA_SIZE`` offset.
The following diagram shows how custom metadata is positioned relative to the
packet data and how pointers are adjusted for metadata access::
|<-- bpf_xdp_adjust_meta(xdp_buff, -METADATA_SIZE) --|
new xdp_buff->data_meta old xdp_buff->data_meta
| |
| xdp_buff->data
| |
+----------+----------------------------------------------------+------+
| headroom | custom metadata | data |
+----------+----------------------------------------------------+------+
| |
| xdp_desc->addr
|<------ xsk_umem__get_data() - METADATA_SIZE -------|
``bpf_xdp_adjust_meta`` ensures that ``METADATA_SIZE`` is aligned to 4 bytes,
does not exceed 252 bytes, and leaves sufficient space for building the
xdp_frame. If these conditions are not met, it returns a negative error. In this
case, the BPF program should not proceed to populate data into the ``data_meta``
area.
Example
=======
+202 -71
View File
@@ -58,6 +58,7 @@
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sock_diag.h>
#include <net/aligned_data.h>
#include <net/net_namespace.h>
@@ -3014,13 +3015,17 @@ out:
}
#ifdef CONFIG_BPF_SYSCALL
union bpf_tcp_iter_batch_item {
struct sock *sk;
__u64 cookie;
};
struct bpf_tcp_iter_state {
struct tcp_iter_state state;
unsigned int cur_sk;
unsigned int end_sk;
unsigned int max_sk;
struct sock **batch;
bool st_bucket_done;
union bpf_tcp_iter_batch_item *batch;
};
struct bpf_iter__tcp {
@@ -3043,21 +3048,32 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
{
while (iter->cur_sk < iter->end_sk)
sock_gen_put(iter->batch[iter->cur_sk++]);
union bpf_tcp_iter_batch_item *item;
unsigned int cur_sk = iter->cur_sk;
__u64 cookie;
/* Remember the cookies of the sockets we haven't seen yet, so we can
* pick up where we left off next time around.
*/
while (cur_sk < iter->end_sk) {
item = &iter->batch[cur_sk++];
cookie = sock_gen_cookie(item->sk);
sock_gen_put(item->sk);
item->cookie = cookie;
}
}
static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
unsigned int new_batch_sz)
unsigned int new_batch_sz, gfp_t flags)
{
struct sock **new_batch;
union bpf_tcp_iter_batch_item *new_batch;
new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
GFP_USER | __GFP_NOWARN);
flags | __GFP_NOWARN);
if (!new_batch)
return -ENOMEM;
bpf_iter_tcp_put_batch(iter);
memcpy(new_batch, iter->batch, sizeof(*iter->batch) * iter->end_sk);
kvfree(iter->batch);
iter->batch = new_batch;
iter->max_sk = new_batch_sz;
@@ -3065,112 +3081,234 @@ static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
return 0;
}
static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
struct sock *start_sk)
static struct sock *bpf_iter_tcp_resume_bucket(struct sock *first_sk,
union bpf_tcp_iter_batch_item *cookies,
int n_cookies)
{
struct hlist_nulls_node *node;
struct sock *sk;
int i;
for (i = 0; i < n_cookies; i++) {
sk = first_sk;
sk_nulls_for_each_from(sk, node)
if (cookies[i].cookie == atomic64_read(&sk->sk_cookie))
return sk;
}
return NULL;
}
static struct sock *bpf_iter_tcp_resume_listening(struct seq_file *seq)
{
struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
struct bpf_tcp_iter_state *iter = seq->private;
struct tcp_iter_state *st = &iter->state;
unsigned int find_cookie = iter->cur_sk;
unsigned int end_cookie = iter->end_sk;
int resume_bucket = st->bucket;
struct sock *sk;
if (end_cookie && find_cookie == end_cookie)
++st->bucket;
sk = listening_get_first(seq);
iter->cur_sk = 0;
iter->end_sk = 0;
if (sk && st->bucket == resume_bucket && end_cookie) {
sk = bpf_iter_tcp_resume_bucket(sk, &iter->batch[find_cookie],
end_cookie - find_cookie);
if (!sk) {
spin_unlock(&hinfo->lhash2[st->bucket].lock);
++st->bucket;
sk = listening_get_first(seq);
}
}
return sk;
}
static struct sock *bpf_iter_tcp_resume_established(struct seq_file *seq)
{
struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
struct bpf_tcp_iter_state *iter = seq->private;
struct tcp_iter_state *st = &iter->state;
unsigned int find_cookie = iter->cur_sk;
unsigned int end_cookie = iter->end_sk;
int resume_bucket = st->bucket;
struct sock *sk;
if (end_cookie && find_cookie == end_cookie)
++st->bucket;
sk = established_get_first(seq);
iter->cur_sk = 0;
iter->end_sk = 0;
if (sk && st->bucket == resume_bucket && end_cookie) {
sk = bpf_iter_tcp_resume_bucket(sk, &iter->batch[find_cookie],
end_cookie - find_cookie);
if (!sk) {
spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
++st->bucket;
sk = established_get_first(seq);
}
}
return sk;
}
static struct sock *bpf_iter_tcp_resume(struct seq_file *seq)
{
struct bpf_tcp_iter_state *iter = seq->private;
struct tcp_iter_state *st = &iter->state;
struct sock *sk = NULL;
switch (st->state) {
case TCP_SEQ_STATE_LISTENING:
sk = bpf_iter_tcp_resume_listening(seq);
if (sk)
break;
st->bucket = 0;
st->state = TCP_SEQ_STATE_ESTABLISHED;
fallthrough;
case TCP_SEQ_STATE_ESTABLISHED:
sk = bpf_iter_tcp_resume_established(seq);
break;
}
return sk;
}
static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
struct sock **start_sk)
{
struct bpf_tcp_iter_state *iter = seq->private;
struct hlist_nulls_node *node;
unsigned int expected = 1;
struct sock *sk;
sock_hold(start_sk);
iter->batch[iter->end_sk++] = start_sk;
sock_hold(*start_sk);
iter->batch[iter->end_sk++].sk = *start_sk;
sk = sk_nulls_next(start_sk);
sk = sk_nulls_next(*start_sk);
*start_sk = NULL;
sk_nulls_for_each_from(sk, node) {
if (seq_sk_match(seq, sk)) {
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
iter->batch[iter->end_sk++] = sk;
iter->batch[iter->end_sk++].sk = sk;
} else if (!*start_sk) {
/* Remember where we left off. */
*start_sk = sk;
}
expected++;
}
}
spin_unlock(&hinfo->lhash2[st->bucket].lock);
return expected;
}
static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
struct sock *start_sk)
struct sock **start_sk)
{
struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
struct bpf_tcp_iter_state *iter = seq->private;
struct tcp_iter_state *st = &iter->state;
struct hlist_nulls_node *node;
unsigned int expected = 1;
struct sock *sk;
sock_hold(start_sk);
iter->batch[iter->end_sk++] = start_sk;
sock_hold(*start_sk);
iter->batch[iter->end_sk++].sk = *start_sk;
sk = sk_nulls_next(start_sk);
sk = sk_nulls_next(*start_sk);
*start_sk = NULL;
sk_nulls_for_each_from(sk, node) {
if (seq_sk_match(seq, sk)) {
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
iter->batch[iter->end_sk++] = sk;
iter->batch[iter->end_sk++].sk = sk;
} else if (!*start_sk) {
/* Remember where we left off. */
*start_sk = sk;
}
expected++;
}
}
spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
return expected;
}
static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
static unsigned int bpf_iter_fill_batch(struct seq_file *seq,
struct sock **start_sk)
{
struct bpf_tcp_iter_state *iter = seq->private;
struct tcp_iter_state *st = &iter->state;
if (st->state == TCP_SEQ_STATE_LISTENING)
return bpf_iter_tcp_listening_batch(seq, start_sk);
else
return bpf_iter_tcp_established_batch(seq, start_sk);
}
static void bpf_iter_tcp_unlock_bucket(struct seq_file *seq)
{
struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
struct bpf_tcp_iter_state *iter = seq->private;
struct tcp_iter_state *st = &iter->state;
if (st->state == TCP_SEQ_STATE_LISTENING)
spin_unlock(&hinfo->lhash2[st->bucket].lock);
else
spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
}
static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
{
struct bpf_tcp_iter_state *iter = seq->private;
unsigned int expected;
bool resized = false;
struct sock *sk;
int err;
/* The st->bucket is done. Directly advance to the next
* bucket instead of having the tcp_seek_last_pos() to skip
* one by one in the current bucket and eventually find out
* it has to advance to the next bucket.
*/
if (iter->st_bucket_done) {
st->offset = 0;
st->bucket++;
if (st->state == TCP_SEQ_STATE_LISTENING &&
st->bucket > hinfo->lhash2_mask) {
st->state = TCP_SEQ_STATE_ESTABLISHED;
st->bucket = 0;
}
}
again:
/* Get a new batch */
iter->cur_sk = 0;
iter->end_sk = 0;
iter->st_bucket_done = false;
sk = tcp_seek_last_pos(seq);
sk = bpf_iter_tcp_resume(seq);
if (!sk)
return NULL; /* Done */
if (st->state == TCP_SEQ_STATE_LISTENING)
expected = bpf_iter_tcp_listening_batch(seq, sk);
else
expected = bpf_iter_tcp_established_batch(seq, sk);
expected = bpf_iter_fill_batch(seq, &sk);
if (likely(iter->end_sk == expected))
goto done;
if (iter->end_sk == expected) {
iter->st_bucket_done = true;
return sk;
/* Batch size was too small. */
bpf_iter_tcp_unlock_bucket(seq);
bpf_iter_tcp_put_batch(iter);
err = bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2,
GFP_USER);
if (err)
return ERR_PTR(err);
sk = bpf_iter_tcp_resume(seq);
if (!sk)
return NULL; /* Done */
expected = bpf_iter_fill_batch(seq, &sk);
if (likely(iter->end_sk == expected))
goto done;
/* Batch size was still too small. Hold onto the lock while we try
* again with a larger batch to make sure the current bucket's size
* does not change in the meantime.
*/
err = bpf_iter_tcp_realloc_batch(iter, expected, GFP_NOWAIT);
if (err) {
bpf_iter_tcp_unlock_bucket(seq);
return ERR_PTR(err);
}
if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) {
resized = true;
goto again;
}
return sk;
expected = bpf_iter_fill_batch(seq, &sk);
WARN_ON_ONCE(iter->end_sk != expected);
done:
bpf_iter_tcp_unlock_bucket(seq);
return iter->batch[0].sk;
}
static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3200,16 +3338,11 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
* meta.seq_num is used instead.
*/
st->num++;
/* Move st->offset to the next sk in the bucket such that
* the future start() will resume at st->offset in
* st->bucket. See tcp_seek_last_pos().
*/
st->offset++;
sock_gen_put(iter->batch[iter->cur_sk++]);
sock_gen_put(iter->batch[iter->cur_sk++].sk);
}
if (iter->cur_sk < iter->end_sk)
sk = iter->batch[iter->cur_sk];
sk = iter->batch[iter->cur_sk].sk;
else
sk = bpf_iter_tcp_batch(seq);
@@ -3275,10 +3408,8 @@ static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
(void)tcp_prog_seq_show(prog, &meta, v, 0);
}
if (iter->cur_sk < iter->end_sk) {
if (iter->cur_sk < iter->end_sk)
bpf_iter_tcp_put_batch(iter);
iter->st_bucket_done = false;
}
}
static const struct seq_operations bpf_iter_tcp_seq_ops = {
@@ -3596,7 +3727,7 @@ static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
if (err)
return err;
err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ);
err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ, GFP_USER);
if (err) {
bpf_iter_fini_seq_net(priv_data);
return err;
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2024 Meta
#include <poll.h>
#include <test_progs.h>
#include "network_helpers.h"
#include "sock_iter_batch.skel.h"
#define TEST_NS "sock_iter_batch_netns"
#define TEST_CHILD_NS "sock_iter_batch_child_netns"
static const int init_batch_size = 16;
static const int nr_soreuse = 4;
@@ -118,6 +120,45 @@ done:
return nth_sock_idx;
}
static void destroy(int fd)
{
struct sock_iter_batch *skel = NULL;
__u64 cookie = socket_cookie(fd);
struct bpf_link *link = NULL;
int iter_fd = -1;
int nread;
__u64 out;
skel = sock_iter_batch__open();
if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open"))
goto done;
skel->rodata->destroy_cookie = cookie;
if (!ASSERT_OK(sock_iter_batch__load(skel), "sock_iter_batch__load"))
goto done;
link = bpf_program__attach_iter(skel->progs.iter_tcp_destroy, NULL);
if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter"))
goto done;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_OK_FD(iter_fd, "bpf_iter_create"))
goto done;
/* Delete matching socket. */
nread = read(iter_fd, &out, sizeof(out));
ASSERT_GE(nread, 0, "nread");
if (nread)
ASSERT_EQ(out, cookie, "cookie matches");
done:
if (iter_fd >= 0)
close(iter_fd);
bpf_link__destroy(link);
sock_iter_batch__destroy(skel);
close(fd);
}
static int get_seen_count(int fd, struct sock_count counts[], int n)
{
__u64 cookie = socket_cookie(fd);
@@ -152,8 +193,71 @@ static void check_n_were_seen_once(int *fds, int fds_len, int n,
ASSERT_EQ(seen_once, n, "seen_once");
}
static int accept_from_one(struct pollfd *server_poll_fds,
int server_poll_fds_len)
{
static const int poll_timeout_ms = 5000; /* 5s */
int ret;
int i;
ret = poll(server_poll_fds, server_poll_fds_len, poll_timeout_ms);
if (!ASSERT_EQ(ret, 1, "poll"))
return -1;
for (i = 0; i < server_poll_fds_len; i++)
if (server_poll_fds[i].revents & POLLIN)
return accept(server_poll_fds[i].fd, NULL, NULL);
return -1;
}
static int *connect_to_server(int family, int sock_type, const char *addr,
__u16 port, int nr_connects, int *server_fds,
int server_fds_len)
{
struct pollfd *server_poll_fds = NULL;
int *established_socks = NULL;
int i;
server_poll_fds = calloc(server_fds_len, sizeof(*server_poll_fds));
if (!ASSERT_OK_PTR(server_poll_fds, "server_poll_fds"))
return NULL;
for (i = 0; i < server_fds_len; i++) {
server_poll_fds[i].fd = server_fds[i];
server_poll_fds[i].events = POLLIN;
}
i = 0;
established_socks = malloc(sizeof(*established_socks) * nr_connects*2);
if (!ASSERT_OK_PTR(established_socks, "established_socks"))
goto error;
while (nr_connects--) {
established_socks[i] = connect_to_addr_str(family, sock_type,
addr, port, NULL);
if (!ASSERT_OK_FD(established_socks[i], "connect_to_addr_str"))
goto error;
i++;
established_socks[i] = accept_from_one(server_poll_fds,
server_fds_len);
if (!ASSERT_OK_FD(established_socks[i], "accept_from_one"))
goto error;
i++;
}
free(server_poll_fds);
return established_socks;
error:
free_fds(established_socks, i);
free(server_poll_fds);
return NULL;
}
static void remove_seen(int family, int sock_type, const char *addr, __u16 port,
int *socks, int socks_len, struct sock_count *counts,
int *socks, int socks_len, int *established_socks,
int established_socks_len, struct sock_count *counts,
int counts_len, struct bpf_link *link, int iter_fd)
{
int close_idx;
@@ -182,8 +286,46 @@ static void remove_seen(int family, int sock_type, const char *addr, __u16 port,
counts_len);
}
static void remove_seen_established(int family, int sock_type, const char *addr,
__u16 port, int *listen_socks,
int listen_socks_len, int *established_socks,
int established_socks_len,
struct sock_count *counts, int counts_len,
struct bpf_link *link, int iter_fd)
{
int close_idx;
/* Iterate through all listening sockets. */
read_n(iter_fd, listen_socks_len, counts, counts_len);
/* Make sure we saw all listening sockets exactly once. */
check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
counts, counts_len);
/* Leave one established socket. */
read_n(iter_fd, established_socks_len - 1, counts, counts_len);
/* Close a socket we've already seen to remove it from the bucket. */
close_idx = get_nth_socket(established_socks, established_socks_len,
link, listen_socks_len + 1);
if (!ASSERT_GE(close_idx, 0, "close_idx"))
return;
destroy(established_socks[close_idx]);
established_socks[close_idx] = -1;
/* Iterate through the rest of the sockets. */
read_n(iter_fd, -1, counts, counts_len);
/* Make sure the last socket wasn't skipped and that there were no
* repeats.
*/
check_n_were_seen_once(established_socks, established_socks_len,
established_socks_len - 1, counts, counts_len);
}
static void remove_unseen(int family, int sock_type, const char *addr,
__u16 port, int *socks, int socks_len,
int *established_socks, int established_socks_len,
struct sock_count *counts, int counts_len,
struct bpf_link *link, int iter_fd)
{
@@ -214,8 +356,54 @@ static void remove_unseen(int family, int sock_type, const char *addr,
counts_len);
}
static void remove_unseen_established(int family, int sock_type,
const char *addr, __u16 port,
int *listen_socks, int listen_socks_len,
int *established_socks,
int established_socks_len,
struct sock_count *counts, int counts_len,
struct bpf_link *link, int iter_fd)
{
int close_idx;
/* Iterate through all listening sockets. */
read_n(iter_fd, listen_socks_len, counts, counts_len);
/* Make sure we saw all listening sockets exactly once. */
check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
counts, counts_len);
/* Iterate through the first established socket. */
read_n(iter_fd, 1, counts, counts_len);
/* Make sure we saw one established socks. */
check_n_were_seen_once(established_socks, established_socks_len, 1,
counts, counts_len);
/* Close what would be the next socket in the bucket to exercise the
* condition where we need to skip past the first cookie we remembered.
*/
close_idx = get_nth_socket(established_socks, established_socks_len,
link, listen_socks_len + 1);
if (!ASSERT_GE(close_idx, 0, "close_idx"))
return;
destroy(established_socks[close_idx]);
established_socks[close_idx] = -1;
/* Iterate through the rest of the sockets. */
read_n(iter_fd, -1, counts, counts_len);
/* Make sure the remaining sockets were seen exactly once and that we
* didn't repeat the socket that was already seen.
*/
check_n_were_seen_once(established_socks, established_socks_len,
established_socks_len - 1, counts, counts_len);
}
static void remove_all(int family, int sock_type, const char *addr,
__u16 port, int *socks, int socks_len,
int *established_socks, int established_socks_len,
struct sock_count *counts, int counts_len,
struct bpf_link *link, int iter_fd)
{
@@ -242,8 +430,57 @@ static void remove_all(int family, int sock_type, const char *addr,
ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n");
}
static void remove_all_established(int family, int sock_type, const char *addr,
__u16 port, int *listen_socks,
int listen_socks_len, int *established_socks,
int established_socks_len,
struct sock_count *counts, int counts_len,
struct bpf_link *link, int iter_fd)
{
int *close_idx = NULL;
int i;
/* Iterate through all listening sockets. */
read_n(iter_fd, listen_socks_len, counts, counts_len);
/* Make sure we saw all listening sockets exactly once. */
check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
counts, counts_len);
/* Iterate through the first established socket. */
read_n(iter_fd, 1, counts, counts_len);
/* Make sure we saw one established socks. */
check_n_were_seen_once(established_socks, established_socks_len, 1,
counts, counts_len);
/* Close all remaining sockets to exhaust the list of saved cookies and
* exit without putting any sockets into the batch on the next read.
*/
close_idx = malloc(sizeof(int) * (established_socks_len - 1));
if (!ASSERT_OK_PTR(close_idx, "close_idx malloc"))
return;
for (i = 0; i < established_socks_len - 1; i++) {
close_idx[i] = get_nth_socket(established_socks,
established_socks_len, link,
listen_socks_len + i);
if (!ASSERT_GE(close_idx[i], 0, "close_idx"))
return;
}
for (i = 0; i < established_socks_len - 1; i++) {
destroy(established_socks[close_idx[i]]);
established_socks[close_idx[i]] = -1;
}
/* Make sure there are no more sockets returned */
ASSERT_EQ(read_n(iter_fd, -1, counts, counts_len), 0, "read_n");
free(close_idx);
}
static void add_some(int family, int sock_type, const char *addr, __u16 port,
int *socks, int socks_len, struct sock_count *counts,
int *socks, int socks_len, int *established_socks,
int established_socks_len, struct sock_count *counts,
int counts_len, struct bpf_link *link, int iter_fd)
{
int *new_socks = NULL;
@@ -271,8 +508,52 @@ done:
free_fds(new_socks, socks_len);
}
static void add_some_established(int family, int sock_type, const char *addr,
__u16 port, int *listen_socks,
int listen_socks_len, int *established_socks,
int established_socks_len,
struct sock_count *counts,
int counts_len, struct bpf_link *link,
int iter_fd)
{
int *new_socks = NULL;
/* Iterate through all listening sockets. */
read_n(iter_fd, listen_socks_len, counts, counts_len);
/* Make sure we saw all listening sockets exactly once. */
check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
counts, counts_len);
/* Iterate through the first established_socks_len - 1 sockets. */
read_n(iter_fd, established_socks_len - 1, counts, counts_len);
/* Make sure we saw established_socks_len - 1 sockets exactly once. */
check_n_were_seen_once(established_socks, established_socks_len,
established_socks_len - 1, counts, counts_len);
/* Double the number of established sockets in the bucket. */
new_socks = connect_to_server(family, sock_type, addr, port,
established_socks_len / 2, listen_socks,
listen_socks_len);
if (!ASSERT_OK_PTR(new_socks, "connect_to_server"))
goto done;
/* Iterate through the rest of the sockets. */
read_n(iter_fd, -1, counts, counts_len);
/* Make sure each of the original sockets was seen exactly once. */
check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
counts, counts_len);
check_n_were_seen_once(established_socks, established_socks_len,
established_socks_len, counts, counts_len);
done:
free_fds(new_socks, established_socks_len);
}
static void force_realloc(int family, int sock_type, const char *addr,
__u16 port, int *socks, int socks_len,
int *established_socks, int established_socks_len,
struct sock_count *counts, int counts_len,
struct bpf_link *link, int iter_fd)
{
@@ -299,11 +580,32 @@ done:
free_fds(new_socks, socks_len);
}
static void force_realloc_established(int family, int sock_type,
const char *addr, __u16 port,
int *listen_socks, int listen_socks_len,
int *established_socks,
int established_socks_len,
struct sock_count *counts, int counts_len,
struct bpf_link *link, int iter_fd)
{
/* Iterate through all sockets to trigger a realloc. */
read_n(iter_fd, -1, counts, counts_len);
/* Make sure each socket was seen exactly once. */
check_n_were_seen_once(listen_socks, listen_socks_len, listen_socks_len,
counts, counts_len);
check_n_were_seen_once(established_socks, established_socks_len,
established_socks_len, counts, counts_len);
}
struct test_case {
void (*test)(int family, int sock_type, const char *addr, __u16 port,
int *socks, int socks_len, struct sock_count *counts,
int *socks, int socks_len, int *established_socks,
int established_socks_len, struct sock_count *counts,
int counts_len, struct bpf_link *link, int iter_fd);
const char *description;
int ehash_buckets;
int connections;
int init_socks;
int max_socks;
int sock_type;
@@ -358,18 +660,140 @@ static struct test_case resume_tests[] = {
.family = AF_INET6,
.test = force_realloc,
},
{
.description = "tcp: resume after removing a seen socket (listening)",
.init_socks = nr_soreuse,
.max_socks = nr_soreuse,
.sock_type = SOCK_STREAM,
.family = AF_INET6,
.test = remove_seen,
},
{
.description = "tcp: resume after removing one unseen socket (listening)",
.init_socks = nr_soreuse,
.max_socks = nr_soreuse,
.sock_type = SOCK_STREAM,
.family = AF_INET6,
.test = remove_unseen,
},
{
.description = "tcp: resume after removing all unseen sockets (listening)",
.init_socks = nr_soreuse,
.max_socks = nr_soreuse,
.sock_type = SOCK_STREAM,
.family = AF_INET6,
.test = remove_all,
},
{
.description = "tcp: resume after adding a few sockets (listening)",
.init_socks = nr_soreuse,
.max_socks = nr_soreuse,
.sock_type = SOCK_STREAM,
/* Use AF_INET so that new sockets are added to the head of the
* bucket's list.
*/
.family = AF_INET,
.test = add_some,
},
{
.description = "tcp: force a realloc to occur (listening)",
.init_socks = init_batch_size,
.max_socks = init_batch_size * 2,
.sock_type = SOCK_STREAM,
/* Use AF_INET6 so that new sockets are added to the tail of the
* bucket's list, needing to be added to the next batch to force
* a realloc.
*/
.family = AF_INET6,
.test = force_realloc,
},
{
.description = "tcp: resume after removing a seen socket (established)",
/* Force all established sockets into one bucket */
.ehash_buckets = 1,
.connections = nr_soreuse,
.init_socks = nr_soreuse,
/* Room for connect()ed and accept()ed sockets */
.max_socks = nr_soreuse * 3,
.sock_type = SOCK_STREAM,
.family = AF_INET6,
.test = remove_seen_established,
},
{
.description = "tcp: resume after removing one unseen socket (established)",
/* Force all established sockets into one bucket */
.ehash_buckets = 1,
.connections = nr_soreuse,
.init_socks = nr_soreuse,
/* Room for connect()ed and accept()ed sockets */
.max_socks = nr_soreuse * 3,
.sock_type = SOCK_STREAM,
.family = AF_INET6,
.test = remove_unseen_established,
},
{
.description = "tcp: resume after removing all unseen sockets (established)",
/* Force all established sockets into one bucket */
.ehash_buckets = 1,
.connections = nr_soreuse,
.init_socks = nr_soreuse,
/* Room for connect()ed and accept()ed sockets */
.max_socks = nr_soreuse * 3,
.sock_type = SOCK_STREAM,
.family = AF_INET6,
.test = remove_all_established,
},
{
.description = "tcp: resume after adding a few sockets (established)",
/* Force all established sockets into one bucket */
.ehash_buckets = 1,
.connections = nr_soreuse,
.init_socks = nr_soreuse,
/* Room for connect()ed and accept()ed sockets */
.max_socks = nr_soreuse * 3,
.sock_type = SOCK_STREAM,
.family = AF_INET6,
.test = add_some_established,
},
{
.description = "tcp: force a realloc to occur (established)",
/* Force all established sockets into one bucket */
.ehash_buckets = 1,
/* Bucket size will need to double when going from listening to
* established sockets.
*/
.connections = init_batch_size,
.init_socks = nr_soreuse,
/* Room for connect()ed and accept()ed sockets */
.max_socks = nr_soreuse + (init_batch_size * 2),
.sock_type = SOCK_STREAM,
.family = AF_INET6,
.test = force_realloc_established,
},
};
static void do_resume_test(struct test_case *tc)
{
struct sock_iter_batch *skel = NULL;
struct sock_count *counts = NULL;
static const __u16 port = 10001;
struct nstoken *nstoken = NULL;
struct bpf_link *link = NULL;
struct sock_count *counts;
int *established_fds = NULL;
int err, iter_fd = -1;
const char *addr;
int *fds = NULL;
int local_port;
if (tc->ehash_buckets) {
SYS_NOFAIL("ip netns del " TEST_CHILD_NS);
SYS(done, "sysctl -wq net.ipv4.tcp_child_ehash_entries=%d",
tc->ehash_buckets);
SYS(done, "ip netns add %s", TEST_CHILD_NS);
SYS(done, "ip -net %s link set dev lo up", TEST_CHILD_NS);
nstoken = open_netns(TEST_CHILD_NS);
if (!ASSERT_OK_PTR(nstoken, "open_child_netns"))
goto done;
}
counts = calloc(tc->max_socks, sizeof(*counts));
if (!ASSERT_OK_PTR(counts, "counts"))
@@ -384,11 +808,18 @@ static void do_resume_test(struct test_case *tc)
tc->init_socks);
if (!ASSERT_OK_PTR(fds, "start_reuseport_server"))
goto done;
local_port = get_socket_local_port(*fds);
if (!ASSERT_GE(local_port, 0, "get_socket_local_port"))
goto done;
skel->rodata->ports[0] = ntohs(local_port);
if (tc->connections) {
established_fds = connect_to_server(tc->family, tc->sock_type,
addr, port,
tc->connections, fds,
tc->init_socks);
if (!ASSERT_OK_PTR(established_fds, "connect_to_server"))
goto done;
}
skel->rodata->ports[0] = 0;
skel->rodata->ports[1] = 0;
skel->rodata->sf = tc->family;
skel->rodata->ss = 0;
err = sock_iter_batch__load(skel);
if (!ASSERT_OK(err, "sock_iter_batch__load"))
@@ -406,10 +837,15 @@ static void do_resume_test(struct test_case *tc)
goto done;
tc->test(tc->family, tc->sock_type, addr, port, fds, tc->init_socks,
counts, tc->max_socks, link, iter_fd);
established_fds, tc->connections*2, counts, tc->max_socks,
link, iter_fd);
done:
close_netns(nstoken);
SYS_NOFAIL("ip netns del " TEST_CHILD_NS);
SYS_NOFAIL("sysctl -w net.ipv4.tcp_child_ehash_entries=0");
free(counts);
free_fds(fds, tc->init_socks);
free_fds(established_fds, tc->connections*2);
if (iter_fd >= 0)
close(iter_fd);
bpf_link__destroy(link);
@@ -454,6 +890,8 @@ static void do_test(int sock_type, bool onebyone)
skel->rodata->ports[i] = ntohs(local_port);
}
skel->rodata->sf = AF_INET6;
if (sock_type == SOCK_STREAM)
skel->rodata->ss = TCP_LISTEN;
err = sock_iter_batch__load(skel);
if (!ASSERT_OK(err, "sock_iter_batch__load"))
@@ -23,6 +23,7 @@ static bool ipv4_addr_loopback(__be32 a)
}
volatile const unsigned int sf;
volatile const unsigned int ss;
volatile const __u16 ports[2];
unsigned int bucket[2];
@@ -42,16 +43,18 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx)
sock_cookie = bpf_get_socket_cookie(sk);
sk = bpf_core_cast(sk, struct sock);
if (sk->sk_family != sf ||
sk->sk_state != TCP_LISTEN ||
sk->sk_family == AF_INET6 ?
(ss && sk->sk_state != ss) ||
(sk->sk_family == AF_INET6 ?
!ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) :
!ipv4_addr_loopback(sk->sk_rcv_saddr))
!ipv4_addr_loopback(sk->sk_rcv_saddr)))
return 0;
if (sk->sk_num == ports[0])
idx = 0;
else if (sk->sk_num == ports[1])
idx = 1;
else if (!ports[0] && !ports[1])
idx = 0;
else
return 0;
@@ -67,6 +70,27 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx)
return 0;
}
volatile const __u64 destroy_cookie;
SEC("iter/tcp")
int iter_tcp_destroy(struct bpf_iter__tcp *ctx)
{
struct sock_common *sk_common = (struct sock_common *)ctx->sk_common;
__u64 sock_cookie;
if (!sk_common)
return 0;
sock_cookie = bpf_get_socket_cookie(sk_common);
if (sock_cookie != destroy_cookie)
return 0;
bpf_sock_destroy(sk_common);
bpf_seq_write(ctx->meta->seq, &sock_cookie, sizeof(sock_cookie));
return 0;
}
#define udp_sk(ptr) container_of(ptr, struct udp_sock, inet.sk)
SEC("iter/udp")
@@ -83,15 +107,17 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx)
sock_cookie = bpf_get_socket_cookie(sk);
sk = bpf_core_cast(sk, struct sock);
if (sk->sk_family != sf ||
sk->sk_family == AF_INET6 ?
(sk->sk_family == AF_INET6 ?
!ipv6_addr_loopback(&sk->sk_v6_rcv_saddr) :
!ipv4_addr_loopback(sk->sk_rcv_saddr))
!ipv4_addr_loopback(sk->sk_rcv_saddr)))
return 0;
if (sk->sk_num == ports[0])
idx = 0;
else if (sk->sk_num == ports[1])
idx = 1;
else if (!ports[0] && !ports[1])
idx = 0;
else
return 0;