1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies */
3
4 #include "mlx5_core.h"
5 #include "fs_core.h"
6 #include "fs_cmd.h"
7 #include "mlx5dr.h"
8 #include "fs_dr.h"
9
mlx5_dr_is_fw_table(u32 flags)10 static bool mlx5_dr_is_fw_table(u32 flags)
11 {
12 if (flags & MLX5_FLOW_TABLE_TERMINATION)
13 return true;
14
15 return false;
16 }
17
mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)18 static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
19 struct mlx5_flow_table *ft,
20 u32 underlay_qpn,
21 bool disconnect)
22 {
23 return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
24 disconnect);
25 }
26
set_miss_action(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)27 static int set_miss_action(struct mlx5_flow_root_namespace *ns,
28 struct mlx5_flow_table *ft,
29 struct mlx5_flow_table *next_ft)
30 {
31 struct mlx5dr_action *old_miss_action;
32 struct mlx5dr_action *action = NULL;
33 struct mlx5dr_table *next_tbl;
34 int err;
35
36 next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL;
37 if (next_tbl) {
38 action = mlx5dr_action_create_dest_table(next_tbl);
39 if (!action)
40 return -EINVAL;
41 }
42 old_miss_action = ft->fs_dr_table.miss_action;
43 err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
44 if (err && action) {
45 err = mlx5dr_action_destroy(action);
46 if (err)
47 mlx5_core_err(ns->dev,
48 "Failed to destroy action (%d)\n", err);
49 action = NULL;
50 }
51 ft->fs_dr_table.miss_action = action;
52 if (old_miss_action) {
53 err = mlx5dr_action_destroy(old_miss_action);
54 if (err)
55 mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
56 err);
57 }
58
59 return err;
60 }
61
mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,unsigned int log_size,struct mlx5_flow_table * next_ft)62 static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
63 struct mlx5_flow_table *ft,
64 unsigned int log_size,
65 struct mlx5_flow_table *next_ft)
66 {
67 struct mlx5dr_table *tbl;
68 u32 flags;
69 int err;
70
71 if (mlx5_dr_is_fw_table(ft->flags))
72 return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
73 log_size,
74 next_ft);
75 flags = ft->flags;
76 /* turn off encap/decap if not supported for sw-str by fw */
77 if (!MLX5_CAP_FLOWTABLE(ns->dev, sw_owner_reformat_supported))
78 flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
79 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
80
81 tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags);
82 if (!tbl) {
83 mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
84 return -EINVAL;
85 }
86
87 ft->fs_dr_table.dr_table = tbl;
88 ft->id = mlx5dr_table_get_id(tbl);
89
90 if (next_ft) {
91 err = set_miss_action(ns, ft, next_ft);
92 if (err) {
93 mlx5dr_table_destroy(tbl);
94 ft->fs_dr_table.dr_table = NULL;
95 return err;
96 }
97 }
98
99 return 0;
100 }
101
mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)102 static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
103 struct mlx5_flow_table *ft)
104 {
105 struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
106 int err;
107
108 if (mlx5_dr_is_fw_table(ft->flags))
109 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
110
111 err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
112 if (err) {
113 mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n",
114 err);
115 return err;
116 }
117 if (action) {
118 err = mlx5dr_action_destroy(action);
119 if (err) {
120 mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n",
121 err);
122 return err;
123 }
124 }
125
126 return err;
127 }
128
mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)129 static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
130 struct mlx5_flow_table *ft,
131 struct mlx5_flow_table *next_ft)
132 {
133 return set_miss_action(ns, ft, next_ft);
134 }
135
mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)136 static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
137 struct mlx5_flow_table *ft,
138 u32 *in,
139 struct mlx5_flow_group *fg)
140 {
141 struct mlx5dr_matcher *matcher;
142 u32 priority = MLX5_GET(create_flow_group_in, in,
143 start_flow_index);
144 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
145 in,
146 match_criteria_enable);
147 struct mlx5dr_match_parameters mask;
148
149 if (mlx5_dr_is_fw_table(ft->flags))
150 return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
151 fg);
152
153 mask.match_buf = MLX5_ADDR_OF(create_flow_group_in,
154 in, match_criteria);
155 mask.match_sz = sizeof(fg->mask.match_criteria);
156
157 matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table,
158 priority,
159 match_criteria_enable,
160 &mask);
161 if (!matcher) {
162 mlx5_core_err(ns->dev, "Failed creating matcher\n");
163 return -EINVAL;
164 }
165
166 fg->fs_dr_matcher.dr_matcher = matcher;
167 return 0;
168 }
169
mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)170 static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
171 struct mlx5_flow_table *ft,
172 struct mlx5_flow_group *fg)
173 {
174 if (mlx5_dr_is_fw_table(ft->flags))
175 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
176
177 return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
178 }
179
create_vport_action(struct mlx5dr_domain * domain,struct mlx5_flow_rule * dst)180 static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
181 struct mlx5_flow_rule *dst)
182 {
183 struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
184
185 return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num,
186 dest_attr->vport.flags &
187 MLX5_FLOW_DEST_VPORT_VHCA_ID,
188 dest_attr->vport.vhca_id);
189 }
190
create_ft_action(struct mlx5dr_domain * domain,struct mlx5_flow_rule * dst)191 static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
192 struct mlx5_flow_rule *dst)
193 {
194 struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
195
196 if (mlx5_dr_is_fw_table(dest_ft->flags))
197 return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
198 return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
199 }
200
create_action_push_vlan(struct mlx5dr_domain * domain,struct mlx5_fs_vlan * vlan)201 static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
202 struct mlx5_fs_vlan *vlan)
203 {
204 u16 n_ethtype = vlan->ethtype;
205 u8 prio = vlan->prio;
206 u16 vid = vlan->vid;
207 u32 vlan_hdr;
208
209 vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
210 return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
211 }
212
contain_vport_reformat_action(struct mlx5_flow_rule * dst)213 static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
214 {
215 return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
216 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
217 }
218
219 #define MLX5_FLOW_CONTEXT_ACTION_MAX 20
mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)220 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
221 struct mlx5_flow_table *ft,
222 struct mlx5_flow_group *group,
223 struct fs_fte *fte)
224 {
225 struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
226 struct mlx5dr_action_dest *term_actions;
227 struct mlx5dr_match_parameters params;
228 struct mlx5_core_dev *dev = ns->dev;
229 struct mlx5dr_action **fs_dr_actions;
230 struct mlx5dr_action *tmp_action;
231 struct mlx5dr_action **actions;
232 bool delay_encap_set = false;
233 struct mlx5dr_rule *rule;
234 struct mlx5_flow_rule *dst;
235 int fs_dr_num_actions = 0;
236 int num_term_actions = 0;
237 int num_actions = 0;
238 size_t match_sz;
239 int err = 0;
240 int i;
241
242 if (mlx5_dr_is_fw_table(ft->flags))
243 return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
244
245 actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
246 GFP_KERNEL);
247 if (!actions) {
248 err = -ENOMEM;
249 goto out_err;
250 }
251
252 fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
253 sizeof(*fs_dr_actions), GFP_KERNEL);
254 if (!fs_dr_actions) {
255 err = -ENOMEM;
256 goto free_actions_alloc;
257 }
258
259 term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
260 sizeof(*term_actions), GFP_KERNEL);
261 if (!term_actions) {
262 err = -ENOMEM;
263 goto free_fs_dr_actions_alloc;
264 }
265
266 match_sz = sizeof(fte->val);
267
268 /* Drop reformat action bit if destination vport set with reformat */
269 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
270 list_for_each_entry(dst, &fte->node.children, node.list) {
271 if (!contain_vport_reformat_action(dst))
272 continue;
273
274 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
275 break;
276 }
277 }
278
279 /* The order of the actions are must to be keep, only the following
280 * order is supported by SW steering:
281 * TX: modify header -> push vlan -> encap
282 * RX: decap -> pop vlan -> modify header
283 */
284 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
285 enum mlx5dr_action_reformat_type decap_type =
286 DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
287
288 tmp_action = mlx5dr_action_create_packet_reformat(domain,
289 decap_type, 0,
290 NULL);
291 if (!tmp_action) {
292 err = -ENOMEM;
293 goto free_actions;
294 }
295 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
296 actions[num_actions++] = tmp_action;
297 }
298
299 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
300 bool is_decap = fte->action.pkt_reformat->reformat_type ==
301 MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
302
303 if (is_decap)
304 actions[num_actions++] =
305 fte->action.pkt_reformat->action.dr_action;
306 else
307 delay_encap_set = true;
308 }
309
310 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
311 tmp_action =
312 mlx5dr_action_create_pop_vlan();
313 if (!tmp_action) {
314 err = -ENOMEM;
315 goto free_actions;
316 }
317 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
318 actions[num_actions++] = tmp_action;
319 }
320
321 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
322 tmp_action =
323 mlx5dr_action_create_pop_vlan();
324 if (!tmp_action) {
325 err = -ENOMEM;
326 goto free_actions;
327 }
328 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
329 actions[num_actions++] = tmp_action;
330 }
331
332 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
333 actions[num_actions++] =
334 fte->action.modify_hdr->action.dr_action;
335
336 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
337 tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
338 if (!tmp_action) {
339 err = -ENOMEM;
340 goto free_actions;
341 }
342 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
343 actions[num_actions++] = tmp_action;
344 }
345
346 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
347 tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
348 if (!tmp_action) {
349 err = -ENOMEM;
350 goto free_actions;
351 }
352 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
353 actions[num_actions++] = tmp_action;
354 }
355
356 if (delay_encap_set)
357 actions[num_actions++] =
358 fte->action.pkt_reformat->action.dr_action;
359
360 /* The order of the actions below is not important */
361
362 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
363 tmp_action = mlx5dr_action_create_drop();
364 if (!tmp_action) {
365 err = -ENOMEM;
366 goto free_actions;
367 }
368 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
369 term_actions[num_term_actions++].dest = tmp_action;
370 }
371
372 if (fte->flow_context.flow_tag) {
373 tmp_action =
374 mlx5dr_action_create_tag(fte->flow_context.flow_tag);
375 if (!tmp_action) {
376 err = -ENOMEM;
377 goto free_actions;
378 }
379 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
380 actions[num_actions++] = tmp_action;
381 }
382
383 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
384 list_for_each_entry(dst, &fte->node.children, node.list) {
385 enum mlx5_flow_destination_type type = dst->dest_attr.type;
386 u32 ft_id;
387
388 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
389 num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
390 err = -ENOSPC;
391 goto free_actions;
392 }
393
394 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
395 continue;
396
397 switch (type) {
398 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
399 tmp_action = create_ft_action(domain, dst);
400 if (!tmp_action) {
401 err = -ENOMEM;
402 goto free_actions;
403 }
404 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
405 term_actions[num_term_actions++].dest = tmp_action;
406 break;
407 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
408 tmp_action = create_vport_action(domain, dst);
409 if (!tmp_action) {
410 err = -ENOMEM;
411 goto free_actions;
412 }
413 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
414 term_actions[num_term_actions].dest = tmp_action;
415
416 if (dst->dest_attr.vport.flags &
417 MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
418 term_actions[num_term_actions].reformat =
419 dst->dest_attr.vport.pkt_reformat->action.dr_action;
420
421 num_term_actions++;
422 break;
423 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
424 ft_id = dst->dest_attr.ft_num;
425 tmp_action = mlx5dr_action_create_dest_table_num(domain,
426 ft_id);
427 if (!tmp_action) {
428 err = -ENOMEM;
429 goto free_actions;
430 }
431 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
432 term_actions[num_term_actions++].dest = tmp_action;
433 break;
434 default:
435 err = -EOPNOTSUPP;
436 goto free_actions;
437 }
438 }
439 }
440
441 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
442 list_for_each_entry(dst, &fte->node.children, node.list) {
443 u32 id;
444
445 if (dst->dest_attr.type !=
446 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
447 continue;
448
449 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
450 err = -ENOSPC;
451 goto free_actions;
452 }
453
454 id = dst->dest_attr.counter_id;
455 tmp_action =
456 mlx5dr_action_create_flow_counter(id);
457 if (!tmp_action) {
458 err = -ENOMEM;
459 goto free_actions;
460 }
461
462 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
463 actions[num_actions++] = tmp_action;
464 }
465 }
466
467 params.match_sz = match_sz;
468 params.match_buf = (u64 *)fte->val;
469 if (num_term_actions == 1) {
470 if (term_actions->reformat)
471 actions[num_actions++] = term_actions->reformat;
472
473 actions[num_actions++] = term_actions->dest;
474 } else if (num_term_actions > 1) {
475 tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
476 term_actions,
477 num_term_actions);
478 if (!tmp_action) {
479 err = -EOPNOTSUPP;
480 goto free_actions;
481 }
482 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
483 actions[num_actions++] = tmp_action;
484 }
485
486 rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
487 ¶ms,
488 num_actions,
489 actions,
490 fte->flow_context.flow_source);
491 if (!rule) {
492 err = -EINVAL;
493 goto free_actions;
494 }
495
496 kfree(term_actions);
497 kfree(actions);
498
499 fte->fs_dr_rule.dr_rule = rule;
500 fte->fs_dr_rule.num_actions = fs_dr_num_actions;
501 fte->fs_dr_rule.dr_actions = fs_dr_actions;
502
503 return 0;
504
505 free_actions:
506 /* Free in reverse order to handle action dependencies */
507 for (i = fs_dr_num_actions - 1; i >= 0; i--)
508 if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
509 mlx5dr_action_destroy(fs_dr_actions[i]);
510
511 kfree(term_actions);
512 free_fs_dr_actions_alloc:
513 kfree(fs_dr_actions);
514 free_actions_alloc:
515 kfree(actions);
516 out_err:
517 mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
518 return err;
519 }
520
mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,int reformat_type,size_t size,void * reformat_data,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)521 static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
522 int reformat_type,
523 size_t size,
524 void *reformat_data,
525 enum mlx5_flow_namespace_type namespace,
526 struct mlx5_pkt_reformat *pkt_reformat)
527 {
528 struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
529 struct mlx5dr_action *action;
530 int dr_reformat;
531
532 switch (reformat_type) {
533 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
534 case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
535 case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
536 dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2;
537 break;
538 case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
539 dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2;
540 break;
541 case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
542 dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
543 break;
544 default:
545 mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
546 reformat_type);
547 return -EOPNOTSUPP;
548 }
549
550 action = mlx5dr_action_create_packet_reformat(dr_domain,
551 dr_reformat,
552 size,
553 reformat_data);
554 if (!action) {
555 mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
556 return -EINVAL;
557 }
558
559 pkt_reformat->action.dr_action = action;
560
561 return 0;
562 }
563
mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)564 static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
565 struct mlx5_pkt_reformat *pkt_reformat)
566 {
567 mlx5dr_action_destroy(pkt_reformat->action.dr_action);
568 }
569
mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)570 static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
571 u8 namespace, u8 num_actions,
572 void *modify_actions,
573 struct mlx5_modify_hdr *modify_hdr)
574 {
575 struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
576 struct mlx5dr_action *action;
577 size_t actions_sz;
578
579 actions_sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) *
580 num_actions;
581 action = mlx5dr_action_create_modify_header(dr_domain, 0,
582 actions_sz,
583 modify_actions);
584 if (!action) {
585 mlx5_core_err(ns->dev, "Failed allocating modify-header action\n");
586 return -EINVAL;
587 }
588
589 modify_hdr->action.dr_action = action;
590
591 return 0;
592 }
593
mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)594 static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
595 struct mlx5_modify_hdr *modify_hdr)
596 {
597 mlx5dr_action_destroy(modify_hdr->action.dr_action);
598 }
599
mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)600 static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
601 struct mlx5_flow_table *ft,
602 struct mlx5_flow_group *group,
603 int modify_mask,
604 struct fs_fte *fte)
605 {
606 return -EOPNOTSUPP;
607 }
608
mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)609 static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
610 struct mlx5_flow_table *ft,
611 struct fs_fte *fte)
612 {
613 struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule;
614 int err;
615 int i;
616
617 if (mlx5_dr_is_fw_table(ft->flags))
618 return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
619
620 err = mlx5dr_rule_destroy(rule->dr_rule);
621 if (err)
622 return err;
623
624 /* Free in reverse order to handle action dependencies */
625 for (i = rule->num_actions - 1; i >= 0; i--)
626 if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
627 mlx5dr_action_destroy(rule->dr_actions[i]);
628
629 kfree(rule->dr_actions);
630 return 0;
631 }
632
mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)633 static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
634 struct mlx5_flow_root_namespace *peer_ns)
635 {
636 struct mlx5dr_domain *peer_domain = NULL;
637
638 if (peer_ns)
639 peer_domain = peer_ns->fs_dr_domain.dr_domain;
640 mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
641 peer_domain);
642 return 0;
643 }
644
mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace * ns)645 static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns)
646 {
647 ns->fs_dr_domain.dr_domain =
648 mlx5dr_domain_create(ns->dev,
649 MLX5DR_DOMAIN_TYPE_FDB);
650 if (!ns->fs_dr_domain.dr_domain) {
651 mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n");
652 return -EOPNOTSUPP;
653 }
654 return 0;
655 }
656
mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace * ns)657 static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
658 {
659 return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
660 }
661
mlx5_fs_dr_is_supported(struct mlx5_core_dev * dev)662 bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
663 {
664 return mlx5dr_is_supported(dev);
665 }
666
667 static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
668 .create_flow_table = mlx5_cmd_dr_create_flow_table,
669 .destroy_flow_table = mlx5_cmd_dr_destroy_flow_table,
670 .modify_flow_table = mlx5_cmd_dr_modify_flow_table,
671 .create_flow_group = mlx5_cmd_dr_create_flow_group,
672 .destroy_flow_group = mlx5_cmd_dr_destroy_flow_group,
673 .create_fte = mlx5_cmd_dr_create_fte,
674 .update_fte = mlx5_cmd_dr_update_fte,
675 .delete_fte = mlx5_cmd_dr_delete_fte,
676 .update_root_ft = mlx5_cmd_dr_update_root_ft,
677 .packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc,
678 .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
679 .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
680 .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
681 .set_peer = mlx5_cmd_dr_set_peer,
682 .create_ns = mlx5_cmd_dr_create_ns,
683 .destroy_ns = mlx5_cmd_dr_destroy_ns,
684 };
685
mlx5_fs_cmd_get_dr_cmds(void)686 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
687 {
688 return &mlx5_flow_cmds_dr;
689 }
690