net: mana: Add standard counter rx_missed_errors

Report standard counter stats->rx_missed_errors
using hc_rx_discards_no_wqe from the hardware.

Add a global workqueue to periodically run
mana_query_gf_stats every 2 seconds to get the latest
info in eth_stats and define a driver capability flag
to notify hardware of the periodic queries.

To avoid repeated failures and log flooding, the workqueue
is not rescheduled if mana_query_gf_stats fails on HWC timeout
error and the stats are reset to 0. Other errors are transient
which will not need a VF reset for recovery.

Signed-off-by: Erni Sri Satya Vennela <ernis@linux.microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Link: https://patch.msgid.link/1763120599-6331-3-git-send-email-ernis@linux.microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Erni Sri Satya Vennela
2025-11-14 03:43:19 -08:00
committed by Jakub Kicinski
parent e275d9091c
commit be4f1d67ec
4 changed files with 43 additions and 7 deletions
+33 -3
View File
@@ -534,6 +534,11 @@ static void mana_get_stats64(struct net_device *ndev,
netdev_stats_to_stats64(st, &ndev->stats);
if (apc->ac->hwc_timeout_occurred)
netdev_warn_once(ndev, "HWC timeout occurred\n");
st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe;
for (q = 0; q < num_queues; q++) {
rx_stats = &apc->rxqs[q]->stats;
@@ -2809,7 +2814,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
return 0;
}
void mana_query_gf_stats(struct mana_context *ac)
int mana_query_gf_stats(struct mana_context *ac)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_gf_stat_resp resp = {};
@@ -2852,14 +2857,14 @@ void mana_query_gf_stats(struct mana_context *ac)
sizeof(resp));
if (err) {
dev_err(dev, "Failed to query GF stats: %d\n", err);
return;
return err;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
sizeof(resp));
if (err || resp.hdr.status) {
dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err,
resp.hdr.status);
return;
return err;
}
ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
@@ -2894,6 +2899,8 @@ void mana_query_gf_stats(struct mana_context *ac)
ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma;
return 0;
}
void mana_query_phy_stats(struct mana_port_context *apc)
@@ -3428,6 +3435,24 @@ int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type even
return 0;
}
#define MANA_GF_STATS_PERIOD (2 * HZ)
static void mana_gf_stats_work_handler(struct work_struct *work)
{
struct mana_context *ac =
container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
int err;
err = mana_query_gf_stats(ac);
if (err == -ETIMEDOUT) {
/* HWC timeout detected - reset stats and stop rescheduling */
ac->hwc_timeout_occurred = true;
memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
return;
}
schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
}
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
@@ -3520,6 +3545,10 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = add_adev(gd, "eth");
INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler);
schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
out:
if (err) {
mana_remove(gd, false);
@@ -3544,6 +3573,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
int i;
disable_work_sync(&ac->link_change_work);
cancel_delayed_work_sync(&ac->gf_stats_work);
/* adev currently doesn't support suspending, always remove it */
if (gd->adev)
@@ -213,8 +213,6 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
if (!apc->port_is_up)
return;
/* we call mana function to update stats from GDMA */
mana_query_gf_stats(apc->ac);
/* We call this mana function to get the phy stats from GDMA and includes
* aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
+5 -1
View File
@@ -592,6 +592,9 @@ enum {
#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
#define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
/* Driver can send HWC periodically to query stats */
#define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21)
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
@@ -601,7 +604,8 @@ enum {
GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \
GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE)
GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \
GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY)
#define GDMA_DRV_CAP_FLAGS2 0
+5 -1
View File
@@ -480,6 +480,10 @@ struct mana_context {
struct mana_eq *eqs;
struct dentry *mana_eqs_debugfs;
/* Workqueue for querying hardware stats */
struct delayed_work gf_stats_work;
bool hwc_timeout_occurred;
struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
/* Link state change work */
@@ -581,7 +585,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
void mana_query_gf_stats(struct mana_context *ac);
int mana_query_gf_stats(struct mana_context *ac);
int mana_query_link_cfg(struct mana_port_context *apc);
int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
int enable_clamping);