|
|
|
@@ -102,6 +102,8 @@ struct mlx5_esw_sched_node {
|
|
|
|
|
u8 level;
|
|
|
|
|
/* Valid only when this node represents a traffic class. */
|
|
|
|
|
u8 tc;
|
|
|
|
|
/* Valid only for a TC arbiter node or vport TC arbiter. */
|
|
|
|
|
u32 tc_bw[DEVLINK_RATE_TCS_MAX];
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
|
|
|
|
@@ -462,6 +464,7 @@ static int
|
|
|
|
|
esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_esw_sched_node *parent = vport_node->parent;
|
|
|
|
|
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
|
|
|
|
|
struct mlx5_core_dev *dev = vport_node->esw->dev;
|
|
|
|
|
void *attr;
|
|
|
|
@@ -477,7 +480,7 @@ esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
|
|
|
|
|
attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
|
|
|
|
|
MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport);
|
|
|
|
|
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
|
|
|
|
|
vport_node->parent->ix);
|
|
|
|
|
parent ? parent->ix : vport_node->esw->qos.root_tsar_ix);
|
|
|
|
|
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
|
|
|
|
|
vport_node->max_rate);
|
|
|
|
|
|
|
|
|
@@ -608,10 +611,7 @@ static void
|
|
|
|
|
esw_qos_tc_arbiter_get_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
|
|
|
|
|
u32 *tc_bw)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_esw_sched_node *vports_tc_node;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry)
|
|
|
|
|
tc_bw[vports_tc_node->tc] = vports_tc_node->bw_share;
|
|
|
|
|
memcpy(tc_bw, tc_arbiter_node->tc_bw, sizeof(tc_arbiter_node->tc_bw));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
@@ -628,6 +628,7 @@ esw_qos_set_tc_arbiter_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
|
|
|
|
|
u8 tc = vports_tc_node->tc;
|
|
|
|
|
u32 bw_share;
|
|
|
|
|
|
|
|
|
|
tc_arbiter_node->tc_bw[tc] = tc_bw[tc];
|
|
|
|
|
bw_share = tc_bw[tc] * fw_max_bw_share;
|
|
|
|
|
bw_share = esw_qos_calc_bw_share(bw_share, divider,
|
|
|
|
|
fw_max_bw_share);
|
|
|
|
@@ -786,48 +787,15 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
|
|
|
|
|
esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack);
|
|
|
|
|
} else {
|
|
|
|
|
/* The eswitch doesn't support scheduling nodes.
|
|
|
|
|
* Create a software-only node0 using the root TSAR to attach vport QoS to.
|
|
|
|
|
*/
|
|
|
|
|
if (!__esw_qos_alloc_node(esw,
|
|
|
|
|
esw->qos.root_tsar_ix,
|
|
|
|
|
SCHED_NODE_TYPE_VPORTS_TSAR,
|
|
|
|
|
NULL))
|
|
|
|
|
esw->qos.node0 = ERR_PTR(-ENOMEM);
|
|
|
|
|
else
|
|
|
|
|
list_add_tail(&esw->qos.node0->entry,
|
|
|
|
|
&esw->qos.domain->nodes);
|
|
|
|
|
}
|
|
|
|
|
if (IS_ERR(esw->qos.node0)) {
|
|
|
|
|
err = PTR_ERR(esw->qos.node0);
|
|
|
|
|
esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err);
|
|
|
|
|
goto err_node0;
|
|
|
|
|
}
|
|
|
|
|
refcount_set(&esw->qos.refcnt, 1);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_node0:
|
|
|
|
|
if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
|
|
|
|
|
esw->qos.root_tsar_ix))
|
|
|
|
|
esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void esw_qos_destroy(struct mlx5_eswitch *esw)
|
|
|
|
|
{
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (esw->qos.node0->ix != esw->qos.root_tsar_ix)
|
|
|
|
|
__esw_qos_destroy_node(esw->qos.node0, NULL);
|
|
|
|
|
else
|
|
|
|
|
__esw_qos_free_node(esw->qos.node0);
|
|
|
|
|
esw->qos.node0 = NULL;
|
|
|
|
|
|
|
|
|
|
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
|
|
|
|
|
SCHEDULING_HIERARCHY_E_SWITCH,
|
|
|
|
|
esw->qos.root_tsar_ix);
|
|
|
|
@@ -990,13 +958,16 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
|
|
|
|
|
int err, new_level, max_level;
|
|
|
|
|
struct mlx5_esw_sched_node *parent = vport_node->parent;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
|
|
|
|
|
int new_level, max_level;
|
|
|
|
|
|
|
|
|
|
/* Increase the parent's level by 2 to account for both the
|
|
|
|
|
* TC arbiter and the vports TC scheduling element.
|
|
|
|
|
*/
|
|
|
|
|
new_level = vport_node->parent->level + 2;
|
|
|
|
|
new_level = (parent ? parent->level : 2) + 2;
|
|
|
|
|
max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev,
|
|
|
|
|
log_esw_max_sched_depth);
|
|
|
|
|
if (new_level > max_level) {
|
|
|
|
@@ -1033,9 +1004,7 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
|
|
|
|
|
err_sched_nodes:
|
|
|
|
|
if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
|
|
|
|
|
esw_qos_node_destroy_sched_element(vport_node, NULL);
|
|
|
|
|
list_add_tail(&vport_node->entry,
|
|
|
|
|
&vport_node->parent->children);
|
|
|
|
|
vport_node->level = vport_node->parent->level + 1;
|
|
|
|
|
esw_qos_node_attach_to_parent(vport_node);
|
|
|
|
|
} else {
|
|
|
|
|
esw_qos_tc_arbiter_scheduling_teardown(vport_node, NULL);
|
|
|
|
|
}
|
|
|
|
@@ -1083,7 +1052,6 @@ err_out:
|
|
|
|
|
static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
|
|
|
|
|
struct mlx5_esw_sched_node *parent = vport_node->parent;
|
|
|
|
|
enum sched_node_type curr_type = vport_node->type;
|
|
|
|
|
|
|
|
|
|
if (curr_type == SCHED_NODE_TYPE_VPORT)
|
|
|
|
@@ -1092,8 +1060,9 @@ static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_a
|
|
|
|
|
esw_qos_vport_tc_disable(vport, extack);
|
|
|
|
|
|
|
|
|
|
vport_node->bw_share = 0;
|
|
|
|
|
memset(vport_node->tc_bw, 0, sizeof(vport_node->tc_bw));
|
|
|
|
|
list_del_init(&vport_node->entry);
|
|
|
|
|
esw_qos_normalize_min_rate(parent->esw, parent, extack);
|
|
|
|
|
esw_qos_normalize_min_rate(vport_node->esw, vport_node->parent, extack);
|
|
|
|
|
|
|
|
|
|
trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport);
|
|
|
|
|
}
|
|
|
|
@@ -1103,25 +1072,23 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport,
|
|
|
|
|
struct mlx5_esw_sched_node *parent,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
|
|
|
|
|
|
|
|
|
|
esw_qos_node_set_parent(vport->qos.sched_node, parent);
|
|
|
|
|
if (type == SCHED_NODE_TYPE_VPORT) {
|
|
|
|
|
err = esw_qos_vport_create_sched_element(vport->qos.sched_node,
|
|
|
|
|
extack);
|
|
|
|
|
} else {
|
|
|
|
|
esw_qos_node_set_parent(vport_node, parent);
|
|
|
|
|
if (type == SCHED_NODE_TYPE_VPORT)
|
|
|
|
|
err = esw_qos_vport_create_sched_element(vport_node, extack);
|
|
|
|
|
else
|
|
|
|
|
err = esw_qos_vport_tc_enable(vport, type, extack);
|
|
|
|
|
}
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
vport->qos.sched_node->type = type;
|
|
|
|
|
esw_qos_normalize_min_rate(parent->esw, parent, extack);
|
|
|
|
|
trace_mlx5_esw_vport_qos_create(vport->dev, vport,
|
|
|
|
|
vport->qos.sched_node->max_rate,
|
|
|
|
|
vport->qos.sched_node->bw_share);
|
|
|
|
|
vport_node->type = type;
|
|
|
|
|
esw_qos_normalize_min_rate(vport_node->esw, parent, extack);
|
|
|
|
|
trace_mlx5_esw_vport_qos_create(vport->dev, vport, vport_node->max_rate,
|
|
|
|
|
vport_node->bw_share);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@@ -1132,6 +1099,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
|
|
|
|
struct mlx5_esw_sched_node *sched_node;
|
|
|
|
|
struct mlx5_eswitch *parent_esw;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
esw_assert_qos_lock_held(esw);
|
|
|
|
@@ -1139,10 +1107,14 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
parent = parent ?: esw->qos.node0;
|
|
|
|
|
sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent);
|
|
|
|
|
if (!sched_node)
|
|
|
|
|
parent_esw = parent ? parent->esw : esw;
|
|
|
|
|
sched_node = __esw_qos_alloc_node(parent_esw, 0, type, parent);
|
|
|
|
|
if (!sched_node) {
|
|
|
|
|
esw_qos_put(esw);
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
if (!parent)
|
|
|
|
|
list_add_tail(&sched_node->entry, &esw->qos.domain->nodes);
|
|
|
|
|
|
|
|
|
|
sched_node->max_rate = max_rate;
|
|
|
|
|
sched_node->min_rate = min_rate;
|
|
|
|
@@ -1150,6 +1122,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
|
|
|
|
|
vport->qos.sched_node = sched_node;
|
|
|
|
|
err = esw_qos_vport_enable(vport, type, parent, extack);
|
|
|
|
|
if (err) {
|
|
|
|
|
__esw_qos_free_node(sched_node);
|
|
|
|
|
esw_qos_put(esw);
|
|
|
|
|
vport->qos.sched_node = NULL;
|
|
|
|
|
}
|
|
|
|
@@ -1157,6 +1130,19 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mlx5_esw_qos_vport_disable_locked(struct mlx5_vport *vport)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
|
|
|
|
|
|
|
|
|
esw_assert_qos_lock_held(esw);
|
|
|
|
|
if (!vport->qos.sched_node)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
esw_qos_vport_disable(vport, NULL);
|
|
|
|
|
mlx5_esw_qos_vport_qos_free(vport);
|
|
|
|
|
esw_qos_put(esw);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
|
|
|
@@ -1168,11 +1154,9 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
|
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
|
|
parent = vport->qos.sched_node->parent;
|
|
|
|
|
WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node");
|
|
|
|
|
WARN(parent, "Disabling QoS on port before detaching it from node");
|
|
|
|
|
|
|
|
|
|
esw_qos_vport_disable(vport, NULL);
|
|
|
|
|
mlx5_esw_qos_vport_qos_free(vport);
|
|
|
|
|
esw_qos_put(esw);
|
|
|
|
|
mlx5_esw_qos_vport_disable_locked(vport);
|
|
|
|
|
unlock:
|
|
|
|
|
esw_qos_unlock(esw);
|
|
|
|
|
}
|
|
|
|
@@ -1262,13 +1246,13 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
|
|
|
|
|
struct mlx5_esw_sched_node *parent,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_esw_sched_node *curr_parent = vport->qos.sched_node->parent;
|
|
|
|
|
enum sched_node_type curr_type = vport->qos.sched_node->type;
|
|
|
|
|
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
|
|
|
|
|
struct mlx5_esw_sched_node *curr_parent = vport_node->parent;
|
|
|
|
|
enum sched_node_type curr_type = vport_node->type;
|
|
|
|
|
u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
|
|
|
|
|
parent = parent ?: curr_parent;
|
|
|
|
|
if (curr_type == type && curr_parent == parent)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
@@ -1276,10 +1260,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
|
|
|
|
|
esw_qos_tc_arbiter_get_bw_shares(vport->qos.sched_node,
|
|
|
|
|
curr_tc_bw);
|
|
|
|
|
}
|
|
|
|
|
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type)
|
|
|
|
|
esw_qos_tc_arbiter_get_bw_shares(vport_node, curr_tc_bw);
|
|
|
|
|
|
|
|
|
|
esw_qos_vport_disable(vport, extack);
|
|
|
|
|
|
|
|
|
@@ -1290,8 +1272,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
|
|
|
|
|
esw_qos_set_tc_arbiter_bw_shares(vport->qos.sched_node,
|
|
|
|
|
curr_tc_bw, extack);
|
|
|
|
|
esw_qos_set_tc_arbiter_bw_shares(vport_node, curr_tc_bw,
|
|
|
|
|
extack);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
@@ -1306,16 +1288,16 @@ static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw
|
|
|
|
|
|
|
|
|
|
esw_assert_qos_lock_held(esw);
|
|
|
|
|
curr_parent = vport->qos.sched_node->parent;
|
|
|
|
|
parent = parent ?: esw->qos.node0;
|
|
|
|
|
if (curr_parent == parent)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* Set vport QoS type based on parent node type if different from
|
|
|
|
|
* default QoS; otherwise, use the vport's current QoS type.
|
|
|
|
|
*/
|
|
|
|
|
if (parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
|
|
|
|
|
if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
|
|
|
|
|
type = SCHED_NODE_TYPE_RATE_LIMITER;
|
|
|
|
|
else if (curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
|
|
|
|
|
else if (curr_parent &&
|
|
|
|
|
curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
|
|
|
|
|
type = SCHED_NODE_TYPE_VPORT;
|
|
|
|
|
else
|
|
|
|
|
type = vport->qos.sched_node->type;
|
|
|
|
@@ -1654,9 +1636,10 @@ static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw,
|
|
|
|
|
static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport,
|
|
|
|
|
u32 *tc_bw)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_eswitch *esw = vport->qos.sched_node ?
|
|
|
|
|
vport->qos.sched_node->parent->esw :
|
|
|
|
|
vport->dev->priv.eswitch;
|
|
|
|
|
struct mlx5_esw_sched_node *node = vport->qos.sched_node;
|
|
|
|
|
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
|
|
|
|
|
|
|
|
|
esw = (node && node->parent) ? node->parent->esw : esw;
|
|
|
|
|
|
|
|
|
|
return esw_qos_validate_unsupported_tc_bw(esw, tc_bw);
|
|
|
|
|
}
|
|
|
|
@@ -1673,6 +1656,21 @@ static bool esw_qos_tc_bw_disabled(u32 *tc_bw)
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void esw_vport_qos_prune_empty(struct mlx5_vport *vport)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
|
|
|
|
|
|
|
|
|
|
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
|
|
|
|
|
if (!vport_node)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (vport_node->parent || vport_node->max_rate ||
|
|
|
|
|
vport_node->min_rate || !esw_qos_tc_bw_disabled(vport_node->tc_bw))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
mlx5_esw_qos_vport_disable_locked(vport);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int mlx5_esw_qos_init(struct mlx5_eswitch *esw)
|
|
|
|
|
{
|
|
|
|
|
if (esw->qos.domain)
|
|
|
|
@@ -1706,6 +1704,10 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
|
|
|
|
|
|
|
|
|
|
esw_qos_lock(esw);
|
|
|
|
|
err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out;
|
|
|
|
|
esw_vport_qos_prune_empty(vport);
|
|
|
|
|
out:
|
|
|
|
|
esw_qos_unlock(esw);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
@@ -1727,6 +1729,10 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
|
|
|
|
|
|
|
|
|
|
esw_qos_lock(esw);
|
|
|
|
|
err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out;
|
|
|
|
|
esw_vport_qos_prune_empty(vport);
|
|
|
|
|
out:
|
|
|
|
|
esw_qos_unlock(esw);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
@@ -1763,7 +1769,8 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
|
|
|
|
|
if (disable) {
|
|
|
|
|
if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
|
|
|
|
|
err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT,
|
|
|
|
|
NULL, extack);
|
|
|
|
|
vport_node->parent, extack);
|
|
|
|
|
esw_vport_qos_prune_empty(vport);
|
|
|
|
|
goto unlock;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1775,7 +1782,7 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
|
|
|
|
|
} else {
|
|
|
|
|
err = esw_qos_vport_update(vport,
|
|
|
|
|
SCHED_NODE_TYPE_TC_ARBITER_TSAR,
|
|
|
|
|
NULL, extack);
|
|
|
|
|
vport_node->parent, extack);
|
|
|
|
|
}
|
|
|
|
|
if (!err)
|
|
|
|
|
esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack);
|
|
|
|
@@ -1924,14 +1931,20 @@ int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
|
|
|
|
|
void *priv, void *parent_priv,
|
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
|
{
|
|
|
|
|
struct mlx5_esw_sched_node *node;
|
|
|
|
|
struct mlx5_esw_sched_node *node = parent ? parent_priv : NULL;
|
|
|
|
|
struct mlx5_vport *vport = priv;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (!parent)
|
|
|
|
|
return mlx5_esw_qos_vport_update_parent(vport, NULL, extack);
|
|
|
|
|
err = mlx5_esw_qos_vport_update_parent(vport, node, extack);
|
|
|
|
|
if (!err) {
|
|
|
|
|
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
|
|
|
|
|
|
|
|
|
node = parent_priv;
|
|
|
|
|
return mlx5_esw_qos_vport_update_parent(vport, node, extack);
|
|
|
|
|
esw_qos_lock(esw);
|
|
|
|
|
esw_vport_qos_prune_empty(vport);
|
|
|
|
|
esw_qos_unlock(esw);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node)
|
|
|
|
|