net/sched: tbf: correct backlog statistic for GSO packets
[ Upstream commit1596a135e3] When the length of a GSO packet in the tbf qdisc is larger than the burst size configured the packet will be segmented by the tbf_segment function. Whenever this function is used to enqueue SKBs, the backlog statistic of the tbf is not increased correctly. This can lead to underflows of the 'backlog' byte-statistic value when these packets are dequeued from tbf. Reproduce the bug: Ensure that the sender machine has GSO enabled. Configured the tbf on the outgoing interface of the machine as follows (burstsize = 1 MTU): $ tc qdisc add dev <oif> root handle 1: tbf rate 50Mbit burst 1514 latency 50ms Send bulk TCP traffic out via this interface, e.g., by running an iPerf3 client on this machine. Check the qdisc statistics: $ tc -s qdisc show dev <oif> The 'backlog' byte-statistic has incorrect values while traffic is transferred, e.g., high values due to u32 underflows. When the transfer is stopped, the value is != 0, which should never happen. This patch fixes this bug by updating the statistics correctly, even if single SKBs of a GSO SKB cannot be enqueued. Fixes:e43ac79a4b("sch_tbf: segment too big GSO packets") Signed-off-by: Martin Ottens <martin.ottens@fau.de> Reviewed-by: Eric Dumazet <edumazet@google.com> Link: https://patch.msgid.link/20241125174608.1484356-1-martin.ottens@fau.de Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
70966e5baf
commit
d381c2b0d7
+12
-6
@@ -208,7 +208,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct tbf_sched_data *q = qdisc_priv(sch);
|
||||
struct sk_buff *segs, *nskb;
|
||||
netdev_features_t features = netif_skb_features(skb);
|
||||
unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
|
||||
unsigned int len = 0, prev_len = qdisc_pkt_len(skb), seg_len;
|
||||
int ret, nb;
|
||||
|
||||
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
|
||||
@@ -219,21 +219,27 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
|
||||
nb = 0;
|
||||
skb_list_walk_safe(segs, segs, nskb) {
|
||||
skb_mark_not_on_list(segs);
|
||||
qdisc_skb_cb(segs)->pkt_len = segs->len;
|
||||
len += segs->len;
|
||||
seg_len = segs->len;
|
||||
qdisc_skb_cb(segs)->pkt_len = seg_len;
|
||||
ret = qdisc_enqueue(segs, q->qdisc, to_free);
|
||||
if (ret != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(ret))
|
||||
qdisc_qstats_drop(sch);
|
||||
} else {
|
||||
nb++;
|
||||
len += seg_len;
|
||||
}
|
||||
}
|
||||
sch->q.qlen += nb;
|
||||
if (nb > 1)
|
||||
sch->qstats.backlog += len;
|
||||
if (nb > 0) {
|
||||
qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
|
||||
consume_skb(skb);
|
||||
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
|
||||
consume_skb(skb);
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
|
||||
Reference in New Issue
Block a user