net: __alloc_skb() cleanup

This patch refactors __alloc_skb() to prepare the following one,
and does not change functionality.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Jason Xing <kerneljasonxing@gmail.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20251116202717.1542829-3-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet
2025-11-16 20:27:16 +00:00
committed by Jakub Kicinski
parent dac0236075
commit 294e638259
+18 -10
View File
@@ -646,25 +646,33 @@ out:
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int flags, int node)
{
struct sk_buff *skb = NULL;
struct kmem_cache *cache;
struct sk_buff *skb;
bool pfmemalloc;
u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
/* Get the HEAD */
if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
likely(node == NUMA_NO_NODE || node == numa_mem_id()))
if (flags & SKB_ALLOC_FCLONE) {
cache = net_hotdata.skbuff_fclone_cache;
goto fallback;
}
cache = net_hotdata.skbuff_cache;
if (unlikely(node != NUMA_NO_NODE && node != numa_mem_id()))
goto fallback;
if (flags & SKB_ALLOC_NAPI) {
skb = napi_skb_cache_get(true);
else
if (unlikely(!skb))
return NULL;
}
if (!skb) {
fallback:
skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
if (unlikely(!skb))
return NULL;
if (unlikely(!skb))
return NULL;
}
prefetchw(skb);
/* We do our best to align skb_shared_info on a separate cache