Merge branch 'net-mlx5-hws-set-of-fixes-and-adjustments'

Tariq Toukan says:

====================
net/mlx5: HWS, set of fixes and adjustments

This patch series by Yevgeny and Vlad introduces a set of steering fixes
and adjustments.
====================

Link: https://patch.msgid.link/1747766802-958178-1-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-05-21 20:44:24 -07:00
12 changed files with 230 additions and 97 deletions
@@ -527,7 +527,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan;
void *in_match_value;
int reformat_id = 0;
u32 reformat_id = 0;
unsigned int inlen;
int dst_cnt_size;
u32 *in, action;
@@ -580,23 +580,21 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, action, action);
if (!extended_dest && fte->act_dests.action.pkt_reformat) {
struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat;
struct mlx5_pkt_reformat *pkt_reformat =
fte->act_dests.action.pkt_reformat;
if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
if (reformat_id < 0) {
mlx5_core_err(dev,
"Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n",
pkt_reformat->reformat_type);
err = reformat_id;
goto err_out;
}
} else {
reformat_id = fte->act_dests.action.pkt_reformat->id;
err = mlx5_fs_get_packet_reformat_id(pkt_reformat,
&reformat_id);
if (err) {
mlx5_core_err(dev,
"Unsupported pkt_reformat type (%d)\n",
pkt_reformat->reformat_type);
goto err_out;
}
}
MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
reformat_id);
if (fte->act_dests.action.modify_hdr) {
if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
@@ -1830,14 +1830,35 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
u32 *id)
{
switch (pkt_reformat->owner) {
case MLX5_FLOW_RESOURCE_OWNER_FW:
*id = pkt_reformat->id;
return 0;
case MLX5_FLOW_RESOURCE_OWNER_SW:
return mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat, id);
case MLX5_FLOW_RESOURCE_OWNER_HWS:
return mlx5_fs_hws_action_get_pkt_reformat_id(pkt_reformat, id);
default:
return -EINVAL;
}
}
static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
struct mlx5_pkt_reformat *p2)
{
return p1->owner == p2->owner &&
(p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
p1->id == p2->id :
mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
mlx5_fs_dr_action_get_pkt_reformat_id(p2));
int err1, err2;
u32 id1, id2;
if (p1->owner != p2->owner)
return false;
err1 = mlx5_fs_get_packet_reformat_id(p1, &id1);
err2 = mlx5_fs_get_packet_reformat_id(p2, &id2);
return !err1 && !err2 && id1 == id2;
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
@@ -58,6 +58,7 @@ struct mlx5_flow_definer {
enum mlx5_flow_resource_owner {
MLX5_FLOW_RESOURCE_OWNER_FW,
MLX5_FLOW_RESOURCE_OWNER_SW,
MLX5_FLOW_RESOURCE_OWNER_HWS,
};
struct mlx5_modify_hdr {
@@ -386,6 +387,9 @@ u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
u32 *id);
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
@@ -72,6 +72,11 @@ enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
return action->type;
}
struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action)
{
return action->ctx->mdev;
}
static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
enum mlx5hws_context_shared_stc_type stc_type,
u8 tbl_type)
@@ -1185,14 +1190,15 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
struct mlx5hws_action_mh_pattern *pattern,
u32 log_bulk_size)
{
u16 num_actions, max_mh_actions = 0, hw_max_actions;
struct mlx5hws_context *ctx = action->ctx;
u16 num_actions, max_mh_actions = 0;
int i, ret, size_in_bytes;
u32 pat_id, arg_id = 0;
__be64 *new_pattern;
size_t pat_max_sz;
pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
hw_max_actions = pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE;
size_in_bytes = pat_max_sz * sizeof(__be64);
new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
if (!new_pattern)
@@ -1202,16 +1208,20 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
for (i = 0; i < num_of_patterns; i++) {
size_t new_num_actions;
size_t cur_num_actions;
u32 nope_location;
u32 nop_locations;
cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
&new_num_actions, &nope_location,
&new_pattern[i * pat_max_sz]);
ret = mlx5hws_pat_calc_nop(pattern[i].data, cur_num_actions,
hw_max_actions, &new_num_actions,
&nop_locations,
&new_pattern[i * pat_max_sz]);
if (ret) {
mlx5hws_err(ctx, "Too many actions after nop insertion\n");
goto free_new_pat;
}
action[i].modify_header.nope_locations = nope_location;
action[i].modify_header.nop_locations = nop_locations;
action[i].modify_header.num_of_actions = new_num_actions;
max_mh_actions = max(max_mh_actions, new_num_actions);
@@ -1258,7 +1268,7 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
MLX5_GET(set_action_in, pattern[i].data, action_type);
} else {
/* Multiple modify actions require a pattern */
if (unlikely(action[i].modify_header.nope_locations)) {
if (unlikely(action[i].modify_header.nop_locations)) {
size_t pattern_sz;
pattern_sz = action[i].modify_header.num_of_actions *
@@ -2100,21 +2110,23 @@ static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
u32 arg_idx,
u8 *arg_data,
u16 num_of_actions,
u32 nope_locations)
u32 nop_locations)
{
u8 *new_arg_data = NULL;
int i, j;
if (unlikely(nope_locations)) {
if (unlikely(nop_locations)) {
new_arg_data = kcalloc(num_of_actions,
MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
if (unlikely(!new_arg_data))
return;
for (i = 0, j = 0; i < num_of_actions; i++, j++) {
memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
if (BIT(i) & nope_locations)
for (i = 0, j = 0; j < num_of_actions; i++, j++) {
if (BIT(i) & nop_locations)
j++;
memcpy(&new_arg_data[j * MLX5HWS_MODIFY_ACTION_SIZE],
&arg_data[i * MLX5HWS_MODIFY_ACTION_SIZE],
MLX5HWS_MODIFY_ACTION_SIZE);
}
}
@@ -2210,6 +2222,7 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
struct mlx5hws_action *action;
u32 arg_sz, arg_idx;
u8 *single_action;
u8 max_actions;
__be32 stc_idx;
rule_action = &apply->rule_action[setter->idx_double];
@@ -2237,21 +2250,23 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
*(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
} else {
/* Argument offset multiple with number of args per these actions */
arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
arg_idx = rule_action->modify_header.offset * arg_sz;
return;
}
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
/* Argument offset multiple with number of args per these actions */
max_actions = action->modify_header.max_num_of_actions;
arg_sz = mlx5hws_arg_get_arg_size(max_actions);
arg_idx = rule_action->modify_header.offset * arg_sz;
if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
apply->require_dep = 1;
hws_action_modify_write(apply->queue,
action->modify_header.arg_id + arg_idx,
rule_action->modify_header.data,
action->modify_header.num_of_actions,
action->modify_header.nope_locations);
}
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
apply->require_dep = 1;
hws_action_modify_write(apply->queue,
action->modify_header.arg_id + arg_idx,
rule_action->modify_header.data,
action->modify_header.num_of_actions,
action->modify_header.nop_locations);
}
}
@@ -136,7 +136,7 @@ struct mlx5hws_action {
u32 pat_id;
u32 arg_id;
__be64 single_action;
u32 nope_locations;
u32 nop_locations;
u8 num_of_patterns;
u8 single_action_type;
u8 num_of_actions;
@@ -1081,13 +1081,8 @@ static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5hws_bwc_rule *rule;
int err = 0;
if (mlx5_fs_cmd_is_fw_term_table(ft)) {
/* Packet reformat on terminamtion table not supported yet */
if (fte->act_dests.action.action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
return -EOPNOTSUPP;
if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
}
err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
if (err)
@@ -1362,7 +1357,7 @@ mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
pkt_reformat->fs_hws_action.pr_data = pr_data;
}
pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
pkt_reformat->fs_hws_action.hws_action = hws_action;
return 0;
@@ -1380,6 +1375,15 @@ static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace
struct mlx5_fs_hws_pr *pr_data;
struct mlx5_fs_pool *pr_pool;
if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) {
struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id;
mlx5_fs_cmd_get_fw_cmds()->
packet_reformat_dealloc(ns, &fw_pkt_reformat);
pkt_reformat->fs_hws_action.fw_reformat_id = 0;
}
if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
return;
@@ -1499,6 +1503,7 @@ static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
err = -ENOMEM;
goto release_mh;
}
mutex_init(&modify_hdr->fs_hws_action.lock);
modify_hdr->fs_hws_action.mh_data = mh_data;
modify_hdr->fs_hws_action.fs_pool = pool;
modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
@@ -1532,6 +1537,58 @@ static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *
modify_hdr->fs_hws_action.mh_data = NULL;
}
int
mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
u32 *reformat_id)
{
enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type;
struct mutex *lock = &pkt_reformat->fs_hws_action.lock;
u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id;
struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
struct mlx5_pkt_reformat_params params = { 0 };
struct mlx5_flow_root_namespace *ns;
struct mlx5_core_dev *dev;
int ret;
mutex_lock(lock);
if (*id != 0) {
*reformat_id = *id;
ret = 0;
goto unlock;
}
dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action);
if (!dev) {
ret = -EINVAL;
goto unlock;
}
ns = mlx5_get_root_namespace(dev, ns_type);
if (!ns) {
ret = -EINVAL;
goto unlock;
}
params.type = pkt_reformat->reformat_type;
params.size = pkt_reformat->fs_hws_action.pr_data->data_size;
params.data = pkt_reformat->fs_hws_action.pr_data->data;
ret = mlx5_fs_cmd_get_fw_cmds()->
packet_reformat_alloc(ns, &params, ns_type, &fw_pkt_reformat);
if (ret)
goto unlock;
*id = fw_pkt_reformat.id;
*reformat_id = *id;
ret = 0;
unlock:
mutex_unlock(lock);
return ret;
}
static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
u16 format_id, u32 *match_mask)
{
@@ -41,6 +41,11 @@ struct mlx5_fs_hws_action {
struct mlx5_fs_pool *fs_pool;
struct mlx5_fs_hws_pr *pr_data;
struct mlx5_fs_hws_mh *mh_data;
u32 fw_reformat_id;
/* Protect `fw_reformat_id` against being initialized from multiple
* threads.
*/
struct mutex lock;
};
struct mlx5_fs_hws_matcher {
@@ -84,12 +89,23 @@ void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data);
#ifdef CONFIG_MLX5_HW_STEERING
int
mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
u32 *reformat_id);
bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void);
#else
static inline int
mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
u32 *reformat_id)
{
return -EOPNOTSUPP;
}
static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
{
return false;
@@ -503,6 +503,15 @@ int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
enum mlx5hws_action_type
mlx5hws_action_get_type(struct mlx5hws_action *action);
/**
* mlx5hws_action_get_dev - Get mlx5 core device.
*
* @action: The action to get the device from.
*
* Return: mlx5 core device.
*/
struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action);
/**
* mlx5hws_action_create_dest_drop - Create a direct rule drop action.
*
@@ -490,8 +490,8 @@ hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
switch (action_type) {
case MLX5_ACTION_TYPE_SET:
case MLX5_ACTION_TYPE_ADD:
*src_field = MLX5_GET(set_action_in, pattern, field);
*dst_field = INVALID_FIELD;
*src_field = INVALID_FIELD;
*dst_field = MLX5_GET(set_action_in, pattern, field);
break;
case MLX5_ACTION_TYPE_COPY:
*src_field = MLX5_GET(copy_action_in, pattern, src_field);
@@ -522,57 +522,59 @@ bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], s
return true;
}
void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
size_t max_actions, size_t *new_size,
u32 *nope_location, __be64 *new_pat)
int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
size_t max_actions, size_t *new_size,
u32 *nop_locations, __be64 *new_pat)
{
u16 prev_src_field = 0, prev_dst_field = 0;
u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD;
u16 src_field, dst_field;
u8 action_type;
bool dependent;
size_t i, j;
*new_size = num_actions;
*nope_location = 0;
*nop_locations = 0;
if (num_actions == 1)
return;
return 0;
for (i = 0, j = 0; i < num_actions; i++, j++) {
action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
if (j >= max_actions)
return -EINVAL;
action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
hws_action_modify_get_target_fields(action_type, &pattern[i],
&src_field, &dst_field);
if (i % 2) {
if (action_type == MLX5_ACTION_TYPE_COPY &&
(prev_src_field == src_field ||
prev_dst_field == dst_field)) {
/* need Nope */
*new_size += 1;
*nope_location |= BIT(i);
memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
MLX5_SET(set_action_in, &new_pat[j],
action_type,
MLX5_MODIFICATION_TYPE_NOP);
j++;
} else if (prev_src_field == src_field) {
/* need Nope*/
*new_size += 1;
*nope_location |= BIT(i);
MLX5_SET(set_action_in, &new_pat[j],
action_type,
MLX5_MODIFICATION_TYPE_NOP);
j++;
}
}
memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
/* check if no more space */
if (j > max_actions) {
*new_size = num_actions;
*nope_location = 0;
return;
/* For every action, look at it and the previous one. The two
* actions are dependent if:
*/
dependent =
(i > 0) &&
/* At least one of the actions is a write and */
(dst_field != INVALID_FIELD ||
prev_dst_field != INVALID_FIELD) &&
/* One reads from the other's source */
(dst_field == prev_src_field ||
src_field == prev_dst_field ||
/* Or both write to the same destination */
dst_field == prev_dst_field);
if (dependent) {
*new_size += 1;
*nop_locations |= BIT(i);
memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
MLX5_SET(set_action_in, &new_pat[j], action_type,
MLX5_MODIFICATION_TYPE_NOP);
j++;
if (j >= max_actions)
return -EINVAL;
}
memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
prev_src_field = src_field;
prev_dst_field = dst_field;
}
return 0;
}
@@ -96,6 +96,7 @@ int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
u8 *arg_data,
size_t data_size);
void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
size_t *new_size, u32 *nope_location, __be64 *new_pat);
int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
size_t max_actions, size_t *new_size,
u32 *nop_locations, __be64 *new_pat);
#endif /* MLX5HWS_PAT_ARG_H_ */
@@ -833,15 +833,21 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
return steering_caps;
}
int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
int
mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
u32 *reformat_id)
{
struct mlx5dr_action *dr_action;
switch (pkt_reformat->reformat_type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
case MLX5_REFORMAT_TYPE_INSERT_HDR:
return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action);
dr_action = pkt_reformat->fs_dr_action.dr_action;
*reformat_id = mlx5dr_action_get_pkt_reformat_id(dr_action);
return 0;
}
return -EOPNOTSUPP;
}
@@ -38,7 +38,9 @@ struct mlx5_fs_dr_table {
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat);
int
mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
u32 *reformat_id);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
@@ -49,9 +51,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
return NULL;
}
static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
static inline int
mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
u32 *reformat_id)
{
return 0;
return -EOPNOTSUPP;
}
static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)