Merge branch 'virtio-fixes-for-tx-ring-sizing-and-resize-error-reporting'
Laurent Vivier says: ==================== virtio: Fixes for TX ring sizing and resize error reporting This patch series contains two fixes and a cleanup for the virtio subsystem. The first patch fixes an error reporting bug in virtio_ring's virtqueue_resize() function. Previously, errors from internal resize helpers could be masked if the subsequent re-enabling of the virtqueue succeeded. This patch restores the correct error propagation, ensuring that callers of virtqueue_resize() are properly informed of underlying resize failures. The second patch does a cleanup of the use of '2+MAX_SKB_FRAGS' The third patch addresses a reliability issue in virtio_net where the TX ring size could be configured too small, potentially leading to persistently stopped queues and degraded performance. It enforces a minimum TX ring size to ensure there's always enough space for at least one maximally-fragmented packet plus an additional slot. ==================== Link: https://patch.msgid.link/20250521092236.661410-1-lvivier@redhat.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
@@ -1104,7 +1104,7 @@ static bool tx_may_stop(struct virtnet_info *vi,
|
||||
* Since most packets only take 1 or 2 ring slots, stopping the queue
|
||||
* early means 16 slots are typically wasted.
|
||||
*/
|
||||
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
|
||||
if (sq->vq->num_free < MAX_SKB_FRAGS + 2) {
|
||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
|
||||
|
||||
netif_tx_stop_queue(txq);
|
||||
@@ -1136,7 +1136,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
|
||||
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
||||
/* More just got used, free them then recheck. */
|
||||
free_old_xmit(sq, txq, false);
|
||||
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
|
||||
if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
|
||||
netif_start_subqueue(dev, qnum);
|
||||
u64_stats_update_begin(&sq->stats.syncp);
|
||||
u64_stats_inc(&sq->stats.wake);
|
||||
@@ -3021,7 +3021,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
|
||||
free_old_xmit(sq, txq, !!budget);
|
||||
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
|
||||
|
||||
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
|
||||
if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
|
||||
if (netif_tx_queue_stopped(txq)) {
|
||||
u64_stats_update_begin(&sq->stats.syncp);
|
||||
u64_stats_inc(&sq->stats.wake);
|
||||
@@ -3218,7 +3218,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
||||
else
|
||||
free_old_xmit(sq, txq, !!budget);
|
||||
|
||||
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
|
||||
if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
|
||||
if (netif_tx_queue_stopped(txq)) {
|
||||
u64_stats_update_begin(&sq->stats.syncp);
|
||||
u64_stats_inc(&sq->stats.wake);
|
||||
@@ -3504,6 +3504,12 @@ static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
|
||||
{
|
||||
int qindex, err;
|
||||
|
||||
if (ring_num <= MAX_SKB_FRAGS + 2) {
|
||||
netdev_err(vi->dev, "tx size (%d) cannot be smaller than %d\n",
|
||||
ring_num, MAX_SKB_FRAGS + 2);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qindex = sq - vi->sq;
|
||||
|
||||
virtnet_tx_pause(vi, sq);
|
||||
|
||||
@@ -2797,7 +2797,7 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
|
||||
void (*recycle_done)(struct virtqueue *vq))
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
int err;
|
||||
int err, err_reset;
|
||||
|
||||
if (num > vq->vq.num_max)
|
||||
return -E2BIG;
|
||||
@@ -2819,7 +2819,11 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
|
||||
else
|
||||
err = virtqueue_resize_split(_vq, num);
|
||||
|
||||
return virtqueue_enable_after_reset(_vq);
|
||||
err_reset = virtqueue_enable_after_reset(_vq);
|
||||
if (err_reset)
|
||||
return err_reset;
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_resize);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user