Merge branch 'mlx5-misx-fixes-2025-08-20'

Mark Bloch says:

====================
mlx5 misx fixes 2025-08-20

This patchset provides misc bug fixes from the team to the mlx5
core and Eth drivers.

v1: https://lore.kernel.org/1755095476-414026-1-git-send-email-tariqt@nvidia.com
====================

Link: https://patch.msgid.link/20250820133209.389065-1-mbloch@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-08-21 07:58:35 -07:00
8 changed files with 140 additions and 105 deletions
@@ -26,7 +26,6 @@ struct mlx5e_dcbx {
u8 cap;
/* Buffer configuration */
bool manual_buffer;
u32 cable_len;
u32 xoff;
u16 port_buff_cell_sz;
@@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
/* Total shared buffer size is split in a ratio of 3:1 between
* lossy and lossless pools respectively.
*/
lossy_epool_size = (shared_buffer_size / 4) * 3;
lossless_ipool_size = shared_buffer_size / 4;
lossy_epool_size = shared_buffer_size - lossless_ipool_size;
mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
lossy_epool_size);
@@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv,
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
u32 new_headroom_size = 0;
u32 current_headroom_size;
u32 current_headroom_cells = 0;
u32 new_headroom_cells = 0;
void *in;
int err;
int i;
current_headroom_size = port_buffer->headroom_size;
in = kzalloc(sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
u64 size = port_buffer->buffer[i].size;
u64 xoff = port_buffer->buffer[i].xoff;
u64 xon = port_buffer->buffer[i].xon;
new_headroom_size += size;
do_div(size, port_buff_cell_sz);
new_headroom_cells += size;
do_div(xoff, port_buff_cell_sz);
do_div(xon, port_buff_cell_sz);
MLX5_SET(bufferx_reg, buffer, size, size);
@@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv,
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
}
new_headroom_size /= port_buff_cell_sz;
current_headroom_size /= port_buff_cell_sz;
err = port_update_shared_buffer(priv->mdev, current_headroom_size,
new_headroom_size);
err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
new_headroom_cells);
if (err)
goto out;
@@ -362,6 +362,7 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 old_cable_len = priv->dcbx.cable_len;
@@ -389,7 +390,14 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
if (MLX5_BUFFER_SUPPORTED(mdev)) {
pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
if (priv->dcbx.manual_buffer)
ret = mlx5_query_port_buffer_ownership(mdev,
&buffer_ownership);
if (ret)
netdev_err(dev,
"%s, Failed to get buffer ownership: %d\n",
__func__, ret);
if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
ret = mlx5e_port_manual_buffer_config(priv, changed,
dev->mtu, &pfc_new,
NULL, NULL);
@@ -982,7 +990,6 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
if (!changed)
return 0;
priv->dcbx.manual_buffer = true;
err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
buffer_size, prio2buffer);
return err;
@@ -1252,7 +1259,6 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
priv->dcbx.manual_buffer = false;
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
mlx5e_ets_init(priv);
@@ -47,10 +47,12 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
vport_num - 1, external);
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
vport_num - 1, false);
vport_num - base_vport, false);
}
}
@@ -102,6 +102,8 @@ struct mlx5_esw_sched_node {
u8 level;
/* Valid only when this node represents a traffic class. */
u8 tc;
/* Valid only for a TC arbiter node or vport TC arbiter. */
u32 tc_bw[DEVLINK_RATE_TCS_MAX];
};
static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
@@ -462,6 +464,7 @@ static int
esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *parent = vport_node->parent;
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = vport_node->esw->dev;
void *attr;
@@ -477,7 +480,7 @@ esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport);
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
vport_node->parent->ix);
parent ? parent->ix : vport_node->esw->qos.root_tsar_ix);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
vport_node->max_rate);
@@ -608,10 +611,7 @@ static void
esw_qos_tc_arbiter_get_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
u32 *tc_bw)
{
struct mlx5_esw_sched_node *vports_tc_node;
list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry)
tc_bw[vports_tc_node->tc] = vports_tc_node->bw_share;
memcpy(tc_bw, tc_arbiter_node->tc_bw, sizeof(tc_arbiter_node->tc_bw));
}
static void
@@ -628,6 +628,7 @@ esw_qos_set_tc_arbiter_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
u8 tc = vports_tc_node->tc;
u32 bw_share;
tc_arbiter_node->tc_bw[tc] = tc_bw[tc];
bw_share = tc_bw[tc] * fw_max_bw_share;
bw_share = esw_qos_calc_bw_share(bw_share, divider,
fw_max_bw_share);
@@ -786,48 +787,15 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
return err;
}
if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack);
} else {
/* The eswitch doesn't support scheduling nodes.
* Create a software-only node0 using the root TSAR to attach vport QoS to.
*/
if (!__esw_qos_alloc_node(esw,
esw->qos.root_tsar_ix,
SCHED_NODE_TYPE_VPORTS_TSAR,
NULL))
esw->qos.node0 = ERR_PTR(-ENOMEM);
else
list_add_tail(&esw->qos.node0->entry,
&esw->qos.domain->nodes);
}
if (IS_ERR(esw->qos.node0)) {
err = PTR_ERR(esw->qos.node0);
esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err);
goto err_node0;
}
refcount_set(&esw->qos.refcnt, 1);
return 0;
err_node0:
if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_ix))
esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
return err;
}
static void esw_qos_destroy(struct mlx5_eswitch *esw)
{
int err;
if (esw->qos.node0->ix != esw->qos.root_tsar_ix)
__esw_qos_destroy_node(esw->qos.node0, NULL);
else
__esw_qos_free_node(esw->qos.node0);
esw->qos.node0 = NULL;
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_ix);
@@ -990,13 +958,16 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
int err, new_level, max_level;
struct mlx5_esw_sched_node *parent = vport_node->parent;
int err;
if (type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
int new_level, max_level;
/* Increase the parent's level by 2 to account for both the
* TC arbiter and the vports TC scheduling element.
*/
new_level = vport_node->parent->level + 2;
new_level = (parent ? parent->level : 2) + 2;
max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev,
log_esw_max_sched_depth);
if (new_level > max_level) {
@@ -1033,9 +1004,7 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
err_sched_nodes:
if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
esw_qos_node_destroy_sched_element(vport_node, NULL);
list_add_tail(&vport_node->entry,
&vport_node->parent->children);
vport_node->level = vport_node->parent->level + 1;
esw_qos_node_attach_to_parent(vport_node);
} else {
esw_qos_tc_arbiter_scheduling_teardown(vport_node, NULL);
}
@@ -1083,7 +1052,6 @@ err_out:
static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
struct mlx5_esw_sched_node *parent = vport_node->parent;
enum sched_node_type curr_type = vport_node->type;
if (curr_type == SCHED_NODE_TYPE_VPORT)
@@ -1092,8 +1060,9 @@ static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_a
esw_qos_vport_tc_disable(vport, extack);
vport_node->bw_share = 0;
memset(vport_node->tc_bw, 0, sizeof(vport_node->tc_bw));
list_del_init(&vport_node->entry);
esw_qos_normalize_min_rate(parent->esw, parent, extack);
esw_qos_normalize_min_rate(vport_node->esw, vport_node->parent, extack);
trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport);
}
@@ -1103,25 +1072,23 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport,
struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
int err;
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
esw_qos_node_set_parent(vport->qos.sched_node, parent);
if (type == SCHED_NODE_TYPE_VPORT) {
err = esw_qos_vport_create_sched_element(vport->qos.sched_node,
extack);
} else {
esw_qos_node_set_parent(vport_node, parent);
if (type == SCHED_NODE_TYPE_VPORT)
err = esw_qos_vport_create_sched_element(vport_node, extack);
else
err = esw_qos_vport_tc_enable(vport, type, extack);
}
if (err)
return err;
vport->qos.sched_node->type = type;
esw_qos_normalize_min_rate(parent->esw, parent, extack);
trace_mlx5_esw_vport_qos_create(vport->dev, vport,
vport->qos.sched_node->max_rate,
vport->qos.sched_node->bw_share);
vport_node->type = type;
esw_qos_normalize_min_rate(vport_node->esw, parent, extack);
trace_mlx5_esw_vport_qos_create(vport->dev, vport, vport_node->max_rate,
vport_node->bw_share);
return 0;
}
@@ -1132,6 +1099,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
struct mlx5_esw_sched_node *sched_node;
struct mlx5_eswitch *parent_esw;
int err;
esw_assert_qos_lock_held(esw);
@@ -1139,10 +1107,14 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
if (err)
return err;
parent = parent ?: esw->qos.node0;
sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent);
if (!sched_node)
parent_esw = parent ? parent->esw : esw;
sched_node = __esw_qos_alloc_node(parent_esw, 0, type, parent);
if (!sched_node) {
esw_qos_put(esw);
return -ENOMEM;
}
if (!parent)
list_add_tail(&sched_node->entry, &esw->qos.domain->nodes);
sched_node->max_rate = max_rate;
sched_node->min_rate = min_rate;
@@ -1150,6 +1122,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
vport->qos.sched_node = sched_node;
err = esw_qos_vport_enable(vport, type, parent, extack);
if (err) {
__esw_qos_free_node(sched_node);
esw_qos_put(esw);
vport->qos.sched_node = NULL;
}
@@ -1157,6 +1130,19 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
return err;
}
static void mlx5_esw_qos_vport_disable_locked(struct mlx5_vport *vport)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
esw_assert_qos_lock_held(esw);
if (!vport->qos.sched_node)
return;
esw_qos_vport_disable(vport, NULL);
mlx5_esw_qos_vport_qos_free(vport);
esw_qos_put(esw);
}
void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
@@ -1168,11 +1154,9 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
goto unlock;
parent = vport->qos.sched_node->parent;
WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node");
WARN(parent, "Disabling QoS on port before detaching it from node");
esw_qos_vport_disable(vport, NULL);
mlx5_esw_qos_vport_qos_free(vport);
esw_qos_put(esw);
mlx5_esw_qos_vport_disable_locked(vport);
unlock:
esw_qos_unlock(esw);
}
@@ -1262,13 +1246,13 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *curr_parent = vport->qos.sched_node->parent;
enum sched_node_type curr_type = vport->qos.sched_node->type;
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
struct mlx5_esw_sched_node *curr_parent = vport_node->parent;
enum sched_node_type curr_type = vport_node->type;
u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
int err;
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
parent = parent ?: curr_parent;
if (curr_type == type && curr_parent == parent)
return 0;
@@ -1276,10 +1260,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
if (err)
return err;
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
esw_qos_tc_arbiter_get_bw_shares(vport->qos.sched_node,
curr_tc_bw);
}
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type)
esw_qos_tc_arbiter_get_bw_shares(vport_node, curr_tc_bw);
esw_qos_vport_disable(vport, extack);
@@ -1290,8 +1272,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
}
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
esw_qos_set_tc_arbiter_bw_shares(vport->qos.sched_node,
curr_tc_bw, extack);
esw_qos_set_tc_arbiter_bw_shares(vport_node, curr_tc_bw,
extack);
}
return err;
@@ -1306,16 +1288,16 @@ static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw
esw_assert_qos_lock_held(esw);
curr_parent = vport->qos.sched_node->parent;
parent = parent ?: esw->qos.node0;
if (curr_parent == parent)
return 0;
/* Set vport QoS type based on parent node type if different from
* default QoS; otherwise, use the vport's current QoS type.
*/
if (parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
type = SCHED_NODE_TYPE_RATE_LIMITER;
else if (curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
else if (curr_parent &&
curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
type = SCHED_NODE_TYPE_VPORT;
else
type = vport->qos.sched_node->type;
@@ -1654,9 +1636,10 @@ static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw,
static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport,
u32 *tc_bw)
{
struct mlx5_eswitch *esw = vport->qos.sched_node ?
vport->qos.sched_node->parent->esw :
vport->dev->priv.eswitch;
struct mlx5_esw_sched_node *node = vport->qos.sched_node;
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
esw = (node && node->parent) ? node->parent->esw : esw;
return esw_qos_validate_unsupported_tc_bw(esw, tc_bw);
}
@@ -1673,6 +1656,21 @@ static bool esw_qos_tc_bw_disabled(u32 *tc_bw)
return true;
}
static void esw_vport_qos_prune_empty(struct mlx5_vport *vport)
{
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
if (!vport_node)
return;
if (vport_node->parent || vport_node->max_rate ||
vport_node->min_rate || !esw_qos_tc_bw_disabled(vport_node->tc_bw))
return;
mlx5_esw_qos_vport_disable_locked(vport);
}
int mlx5_esw_qos_init(struct mlx5_eswitch *esw)
{
if (esw->qos.domain)
@@ -1706,6 +1704,10 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
esw_qos_lock(esw);
err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack);
if (err)
goto out;
esw_vport_qos_prune_empty(vport);
out:
esw_qos_unlock(esw);
return err;
}
@@ -1727,6 +1729,10 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
esw_qos_lock(esw);
err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack);
if (err)
goto out;
esw_vport_qos_prune_empty(vport);
out:
esw_qos_unlock(esw);
return err;
}
@@ -1763,7 +1769,8 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
if (disable) {
if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT,
NULL, extack);
vport_node->parent, extack);
esw_vport_qos_prune_empty(vport);
goto unlock;
}
@@ -1775,7 +1782,7 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
} else {
err = esw_qos_vport_update(vport,
SCHED_NODE_TYPE_TC_ARBITER_TSAR,
NULL, extack);
vport_node->parent, extack);
}
if (!err)
esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack);
@@ -1924,14 +1931,20 @@ int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
void *priv, void *parent_priv,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *node;
struct mlx5_esw_sched_node *node = parent ? parent_priv : NULL;
struct mlx5_vport *vport = priv;
int err;
if (!parent)
return mlx5_esw_qos_vport_update_parent(vport, NULL, extack);
err = mlx5_esw_qos_vport_update_parent(vport, node, extack);
if (!err) {
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
node = parent_priv;
return mlx5_esw_qos_vport_update_parent(vport, node, extack);
esw_qos_lock(esw);
esw_vport_qos_prune_empty(vport);
esw_qos_unlock(esw);
}
return err;
}
static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node)
@@ -373,11 +373,6 @@ struct mlx5_eswitch {
refcount_t refcnt;
u32 root_tsar_ix;
struct mlx5_qos_domain *domain;
/* Contains all vports with QoS enabled but no explicit node.
* Cannot be NULL if QoS is enabled, but may be a fake node
* referencing the root TSAR if the esw doesn't support nodes.
*/
struct mlx5_esw_sched_node *node0;
} qos;
struct mlx5_esw_bridge_offloads *br_offloads;
@@ -367,6 +367,8 @@ int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
u8 *buffer_ownership);
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
@@ -968,6 +968,26 @@ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
return err;
}
int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
u8 *buffer_ownership)
{
u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {};
int err;
if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) {
*buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
return 0;
}
err = mlx5_query_pfcc_reg(mdev, out, sizeof(out));
if (err)
return err;
*buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership);
return 0;
}
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
{
int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);