1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef __MLX5_ESWITCH_H__
34#define __MLX5_ESWITCH_H__
35
36#include <linux/if_ether.h>
37#include <linux/if_link.h>
38#include <linux/atomic.h>
39#include <linux/xarray.h>
40#include <net/devlink.h>
41#include <linux/mlx5/device.h>
42#include <linux/mlx5/eswitch.h>
43#include <linux/mlx5/vport.h>
44#include <linux/mlx5/fs.h>
45#include "lib/mpfs.h"
46#include "lib/fs_chains.h"
47#include "sf/sf.h"
48#include "en/tc_ct.h"
49#include "en/tc/sample.h"
50
51enum mlx5_mapped_obj_type {
52 MLX5_MAPPED_OBJ_CHAIN,
53 MLX5_MAPPED_OBJ_SAMPLE,
54};
55
56struct mlx5_mapped_obj {
57 enum mlx5_mapped_obj_type type;
58 union {
59 u32 chain;
60 struct {
61 u32 group_id;
62 u32 rate;
63 u32 trunc_size;
64 u32 tunnel_id;
65 } sample;
66 };
67};
68
69#ifdef CONFIG_MLX5_ESWITCH
70
71#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
72
73#define MLX5_MAX_UC_PER_VPORT(dev) \
74 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
75
76#define MLX5_MAX_MC_PER_VPORT(dev) \
77 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
78
79#define mlx5_esw_has_fwd_fdb(dev) \
80 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
81
82#define esw_chains(esw) \
83 ((esw)->fdb_table.offloads.esw_chains_priv)
84
85enum {
86 MAPPING_TYPE_CHAIN,
87 MAPPING_TYPE_TUNNEL,
88 MAPPING_TYPE_TUNNEL_ENC_OPTS,
89 MAPPING_TYPE_LABELS,
90 MAPPING_TYPE_ZONE,
91};
92
93struct vport_ingress {
94 struct mlx5_flow_table *acl;
95 struct mlx5_flow_handle *allow_rule;
96 struct {
97 struct mlx5_flow_group *allow_spoofchk_only_grp;
98 struct mlx5_flow_group *allow_untagged_spoofchk_grp;
99 struct mlx5_flow_group *allow_untagged_only_grp;
100 struct mlx5_flow_group *drop_grp;
101 struct mlx5_flow_handle *drop_rule;
102 struct mlx5_fc *drop_counter;
103 } legacy;
104 struct {
105
106
107
108 struct mlx5_flow_group *metadata_prio_tag_grp;
109
110
111
112 struct mlx5_flow_group *metadata_allmatch_grp;
113 struct mlx5_modify_hdr *modify_metadata;
114 struct mlx5_flow_handle *modify_metadata_rule;
115 } offloads;
116};
117
118struct vport_egress {
119 struct mlx5_flow_table *acl;
120 struct mlx5_flow_handle *allowed_vlan;
121 struct mlx5_flow_group *vlan_grp;
122 union {
123 struct {
124 struct mlx5_flow_group *drop_grp;
125 struct mlx5_flow_handle *drop_rule;
126 struct mlx5_fc *drop_counter;
127 } legacy;
128 struct {
129 struct mlx5_flow_group *fwd_grp;
130 struct mlx5_flow_handle *fwd_rule;
131 struct mlx5_flow_handle *bounce_rule;
132 struct mlx5_flow_group *bounce_grp;
133 } offloads;
134 };
135};
136
137struct mlx5_vport_drop_stats {
138 u64 rx_dropped;
139 u64 tx_dropped;
140};
141
142struct mlx5_vport_info {
143 u8 mac[ETH_ALEN];
144 u16 vlan;
145 u64 node_guid;
146 int link_state;
147 u8 qos;
148 u8 spoofchk: 1;
149 u8 trusted: 1;
150};
151
152
153enum mlx5_eswitch_vport_event {
154 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
155 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
156 MLX5_VPORT_PROMISC_CHANGE = BIT(3),
157};
158
159struct mlx5_vport {
160 struct mlx5_core_dev *dev;
161 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
162 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
163 struct mlx5_flow_handle *promisc_rule;
164 struct mlx5_flow_handle *allmulti_rule;
165 struct work_struct vport_change_handler;
166
167 struct vport_ingress ingress;
168 struct vport_egress egress;
169 u32 default_metadata;
170 u32 metadata;
171
172 struct mlx5_vport_info info;
173
174 struct {
175 bool enabled;
176 u32 esw_tsar_ix;
177 u32 bw_share;
178 u32 min_rate;
179 u32 max_rate;
180 struct mlx5_esw_rate_group *group;
181 } qos;
182
183 u16 vport;
184 bool enabled;
185 enum mlx5_eswitch_vport_event enabled_events;
186 int index;
187 struct devlink_port *dl_port;
188};
189
190struct mlx5_esw_indir_table;
191
192struct mlx5_eswitch_fdb {
193 union {
194 struct legacy_fdb {
195 struct mlx5_flow_table *fdb;
196 struct mlx5_flow_group *addr_grp;
197 struct mlx5_flow_group *allmulti_grp;
198 struct mlx5_flow_group *promisc_grp;
199 struct mlx5_flow_table *vepa_fdb;
200 struct mlx5_flow_handle *vepa_uplink_rule;
201 struct mlx5_flow_handle *vepa_star_rule;
202 } legacy;
203
204 struct offloads_fdb {
205 struct mlx5_flow_namespace *ns;
206 struct mlx5_flow_table *tc_miss_table;
207 struct mlx5_flow_table *slow_fdb;
208 struct mlx5_flow_group *send_to_vport_grp;
209 struct mlx5_flow_group *send_to_vport_meta_grp;
210 struct mlx5_flow_group *peer_miss_grp;
211 struct mlx5_flow_handle **peer_miss_rules;
212 struct mlx5_flow_group *miss_grp;
213 struct mlx5_flow_handle **send_to_vport_meta_rules;
214 struct mlx5_flow_handle *miss_rule_uni;
215 struct mlx5_flow_handle *miss_rule_multi;
216 int vlan_push_pop_refcount;
217
218 struct mlx5_fs_chains *esw_chains_priv;
219 struct {
220 DECLARE_HASHTABLE(table, 8);
221
222 struct mutex lock;
223 } vports;
224
225 struct mlx5_esw_indir_table *indir;
226
227 } offloads;
228 };
229 u32 flags;
230};
231
232struct mlx5_esw_offload {
233 struct mlx5_flow_table *ft_offloads_restore;
234 struct mlx5_flow_group *restore_group;
235 struct mlx5_modify_hdr *restore_copy_hdr_id;
236 struct mapping_ctx *reg_c0_obj_pool;
237
238 struct mlx5_flow_table *ft_offloads;
239 struct mlx5_flow_group *vport_rx_group;
240 struct xarray vport_reps;
241 struct list_head peer_flows;
242 struct mutex peer_mutex;
243 struct mutex encap_tbl_lock;
244 DECLARE_HASHTABLE(encap_tbl, 8);
245 struct mutex decap_tbl_lock;
246 DECLARE_HASHTABLE(decap_tbl, 8);
247 struct mod_hdr_tbl mod_hdr;
248 DECLARE_HASHTABLE(termtbl_tbl, 8);
249 struct mutex termtbl_mutex;
250 struct xarray vhca_map;
251 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
252 u8 inline_mode;
253 atomic64_t num_flows;
254 enum devlink_eswitch_encap_mode encap;
255 struct ida vport_metadata_ida;
256 unsigned int host_number;
257};
258
259
260struct esw_mc_addr {
261 struct l2addr_node node;
262 struct mlx5_flow_handle *uplink_rule;
263 u32 refcnt;
264};
265
266struct mlx5_host_work {
267 struct work_struct work;
268 struct mlx5_eswitch *esw;
269};
270
271struct mlx5_esw_functions {
272 struct mlx5_nb nb;
273 u16 num_vfs;
274};
275
276enum {
277 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
278 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
279};
280
281struct mlx5_esw_bridge_offloads;
282
283struct mlx5_eswitch {
284 struct mlx5_core_dev *dev;
285 struct mlx5_nb nb;
286 struct mlx5_eswitch_fdb fdb_table;
287
288 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
289 struct esw_mc_addr mc_promisc;
290
291 struct workqueue_struct *work_queue;
292 struct xarray vports;
293 u32 flags;
294 int total_vports;
295 int enabled_vports;
296
297
298
299 struct mutex state_lock;
300
301
302
303
304 struct rw_semaphore mode_lock;
305 atomic64_t user_count;
306
307 struct {
308 bool enabled;
309 u32 root_tsar_ix;
310 struct mlx5_esw_rate_group *group0;
311 struct list_head groups;
312 } qos;
313
314 struct mlx5_esw_bridge_offloads *br_offloads;
315 struct mlx5_esw_offload offloads;
316 int mode;
317 u16 manager_vport;
318 u16 first_host_vport;
319 struct mlx5_esw_functions esw_funcs;
320 struct {
321 u32 large_group_num;
322 } params;
323 struct blocking_notifier_head n_head;
324 struct lock_class_key mode_lock_key;
325};
326
327void esw_offloads_disable(struct mlx5_eswitch *esw);
328int esw_offloads_enable(struct mlx5_eswitch *esw);
329void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
330int esw_offloads_init_reps(struct mlx5_eswitch *esw);
331
332bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
333int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
334u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
335void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
336
337int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
338
339
340int mlx5_eswitch_init(struct mlx5_core_dev *dev);
341void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
342
343#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
344int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
345int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
346void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
347void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
348int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
349 u16 vport, const u8 *mac);
350int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
351 u16 vport, int link_state);
352int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
353 u16 vport, u16 vlan, u8 qos);
354int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
355 u16 vport, bool spoofchk);
356int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
357 u16 vport_num, bool setting);
358int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
359 u32 max_rate, u32 min_rate);
360int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
361 struct mlx5_vport *vport,
362 struct mlx5_esw_rate_group *group,
363 struct netlink_ext_ack *extack);
364int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
365int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
366int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
367 u16 vport, struct ifla_vf_info *ivi);
368int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
369 u16 vport,
370 struct ifla_vf_stats *vf_stats);
371void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
372
373int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
374 bool other_vport, void *in);
375
376struct mlx5_flow_spec;
377struct mlx5_esw_flow_attr;
378struct mlx5_termtbl_handle;
379
380bool
381mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
382 struct mlx5_flow_attr *attr,
383 struct mlx5_flow_act *flow_act,
384 struct mlx5_flow_spec *spec);
385
386struct mlx5_flow_handle *
387mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
388 struct mlx5_flow_table *ft,
389 struct mlx5_flow_spec *spec,
390 struct mlx5_esw_flow_attr *attr,
391 struct mlx5_flow_act *flow_act,
392 struct mlx5_flow_destination *dest,
393 int num_dest);
394
395void
396mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
397 struct mlx5_termtbl_handle *tt);
398
399void
400mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
401
402struct mlx5_flow_handle *
403mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
404 struct mlx5_flow_spec *spec,
405 struct mlx5_flow_attr *attr);
406struct mlx5_flow_handle *
407mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
408 struct mlx5_flow_spec *spec,
409 struct mlx5_flow_attr *attr);
410void
411mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
412 struct mlx5_flow_handle *rule,
413 struct mlx5_flow_attr *attr);
414void
415mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
416 struct mlx5_flow_handle *rule,
417 struct mlx5_flow_attr *attr);
418
419struct mlx5_flow_handle *
420mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
421 struct mlx5_flow_destination *dest);
422
423enum {
424 SET_VLAN_STRIP = BIT(0),
425 SET_VLAN_INSERT = BIT(1)
426};
427
428enum mlx5_flow_match_level {
429 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE,
430 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2,
431 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP,
432 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
433};
434
435
436#define MLX5_MAX_FLOW_FWD_VPORTS 2
437
438enum {
439 MLX5_ESW_DEST_ENCAP = BIT(0),
440 MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
441 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2),
442};
443
444enum {
445 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
446 MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
447 MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
448 MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
449 MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
450};
451
452struct mlx5_esw_flow_attr {
453 struct mlx5_eswitch_rep *in_rep;
454 struct mlx5_core_dev *in_mdev;
455 struct mlx5_core_dev *counter_dev;
456
457 int split_count;
458 int out_count;
459
460 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
461 u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
462 u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
463 u8 total_vlan;
464 struct {
465 u32 flags;
466 struct mlx5_eswitch_rep *rep;
467 struct mlx5_pkt_reformat *pkt_reformat;
468 struct mlx5_core_dev *mdev;
469 struct mlx5_termtbl_handle *termtbl;
470 int src_port_rewrite_act_id;
471 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
472 struct mlx5_rx_tun_attr *rx_tun_attr;
473 struct mlx5_pkt_reformat *decap_pkt_reformat;
474};
475
476int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
477 struct netlink_ext_ack *extack);
478int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
479int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
480 struct netlink_ext_ack *extack);
481int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
482int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
483 enum devlink_eswitch_encap_mode encap,
484 struct netlink_ext_ack *extack);
485int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
486 enum devlink_eswitch_encap_mode *encap);
487int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
488 u8 *hw_addr, int *hw_addr_len,
489 struct netlink_ext_ack *extack);
490int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
491 const u8 *hw_addr, int hw_addr_len,
492 struct netlink_ext_ack *extack);
493
494void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
495
496int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
497 struct mlx5_flow_attr *attr);
498int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
499 struct mlx5_flow_attr *attr);
500int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
501 u16 vport, u16 vlan, u8 qos, u8 set_flags);
502
503static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
504{
505 return esw->qos.enabled;
506}
507
508static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
509 u8 vlan_depth)
510{
511 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
512 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
513
514 if (vlan_depth == 1)
515 return ret;
516
517 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
518 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
519}
520
521bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
522 struct mlx5_core_dev *dev1);
523bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
524 struct mlx5_core_dev *dev1);
525
526const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
527
528#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
529
530#define esw_info(__dev, format, ...) \
531 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
532
533#define esw_warn(__dev, format, ...) \
534 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
535
536#define esw_debug(dev, format, ...) \
537 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
538
539static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
540{
541 return esw && MLX5_ESWITCH_MANAGER(esw->dev);
542}
543
544
545static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
546{
547 return mlx5_core_is_ecpf_esw_manager(dev) ?
548 MLX5_VPORT_ECPF : MLX5_VPORT_PF;
549}
550
551static inline bool
552mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
553{
554 return esw->manager_vport == vport_num;
555}
556
557static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
558{
559 return mlx5_core_is_ecpf_esw_manager(dev) ?
560 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
561}
562
563static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
564{
565 return mlx5_core_is_ecpf_esw_manager(dev);
566}
567
568static inline unsigned int
569mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
570 u16 vport_num)
571{
572 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
573}
574
575static inline u16
576mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
577{
578 return dl_port_index & 0xffff;
579}
580
581
582void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
583
584
585
586
587
588
589
590#define MLX5_ESW_VPT_HOST_FN XA_MARK_0
591#define MLX5_ESW_VPT_VF XA_MARK_1
592#define MLX5_ESW_VPT_SF XA_MARK_2
593
594
595
596
597
598#define mlx5_esw_for_each_vport(esw, index, vport) \
599 xa_for_each(&((esw)->vports), index, vport)
600
601#define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
602 for (index = 0, entry = xa_find(xa, &index, last, filter); \
603 entry; entry = xa_find_after(xa, &index, last, filter))
604
605#define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
606 mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
607
608#define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
609 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
610
611#define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
612 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
613
614struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
615struct mlx5_vport *__must_check
616mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
617
618bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
619bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
620
621int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
622
623int
624mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
625 enum mlx5_eswitch_vport_event enabled_events);
626void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
627
628int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
629 enum mlx5_eswitch_vport_event enabled_events);
630void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
631
632int
633esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
634 struct mlx5_vport *vport);
635void
636esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
637 struct mlx5_vport *vport);
638
639struct esw_vport_tbl_namespace {
640 int max_fte;
641 int max_num_groups;
642 u32 flags;
643};
644
645struct mlx5_vport_tbl_attr {
646 u32 chain;
647 u16 prio;
648 u16 vport;
649 const struct esw_vport_tbl_namespace *vport_ns;
650};
651
652struct mlx5_flow_table *
653mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
654void
655mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
656
657struct mlx5_flow_handle *
658esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
659
660int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
661void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
662
663int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
664void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
665
666int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
667 enum mlx5_eswitch_vport_event enabled_events);
668void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
669
670int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
671 enum mlx5_eswitch_vport_event enabled_events);
672void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
673
674int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
675void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
676struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
677
678int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
679 u16 vport_num, u32 controller, u32 sfnum);
680void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
681
682int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
683 u16 vport_num, u32 controller, u32 sfnum);
684void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
685int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
686
687int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
688void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
689int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
690
691
692
693
694
695
696struct mlx5_esw_event_info {
697 u16 new_mode;
698};
699
700int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
701void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
702
703bool mlx5_esw_hold(struct mlx5_core_dev *dev);
704void mlx5_esw_release(struct mlx5_core_dev *dev);
705void mlx5_esw_get(struct mlx5_core_dev *dev);
706void mlx5_esw_put(struct mlx5_core_dev *dev);
707int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
708void mlx5_esw_unlock(struct mlx5_eswitch *esw);
709void mlx5_esw_lock(struct mlx5_eswitch *esw);
710
711void esw_vport_change_handle_locked(struct mlx5_vport *vport);
712
713bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
714
715int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
716 struct mlx5_eswitch *slave_esw);
717void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
718 struct mlx5_eswitch *slave_esw);
719int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
720
721#else
722
723static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
724static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
725static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
726static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
727static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
728static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
729static inline
730int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
731static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
732{
733 return ERR_PTR(-EOPNOTSUPP);
734}
735
736static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; }
737static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; }
738
739static inline struct mlx5_flow_handle *
740esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
741{
742 return ERR_PTR(-EOPNOTSUPP);
743}
744
745static inline unsigned int
746mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
747 u16 vport_num)
748{
749 return vport_num;
750}
751
752static inline int
753mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
754 struct mlx5_eswitch *slave_esw)
755{
756 return 0;
757}
758
759static inline void
760mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
761 struct mlx5_eswitch *slave_esw) {}
762
763static inline int
764mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
765{
766 return 0;
767}
768#endif
769
770#endif
771