1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/types.h>
7#include <linux/pci.h>
8#include <linux/netdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/ethtool.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/skbuff.h>
14#include <linux/if_vlan.h>
15#include <linux/if_bridge.h>
16#include <linux/workqueue.h>
17#include <linux/jiffies.h>
18#include <linux/bitops.h>
19#include <linux/list.h>
20#include <linux/notifier.h>
21#include <linux/dcbnl.h>
22#include <linux/inetdevice.h>
23#include <linux/netlink.h>
24#include <linux/jhash.h>
25#include <linux/log2.h>
26#include <net/switchdev.h>
27#include <net/pkt_cls.h>
28#include <net/netevent.h>
29#include <net/addrconf.h>
30
31#include "spectrum.h"
32#include "pci.h"
33#include "core.h"
34#include "core_env.h"
35#include "reg.h"
36#include "port.h"
37#include "trap.h"
38#include "txheader.h"
39#include "spectrum_cnt.h"
40#include "spectrum_dpipe.h"
41#include "spectrum_acl_flex_actions.h"
42#include "spectrum_span.h"
43#include "spectrum_ptp.h"
44#include "spectrum_trap.h"
45
46#define MLXSW_SP1_FWREV_MAJOR 13
47#define MLXSW_SP1_FWREV_MINOR 2008
48#define MLXSW_SP1_FWREV_SUBMINOR 2018
49#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
50
51static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
52 .major = MLXSW_SP1_FWREV_MAJOR,
53 .minor = MLXSW_SP1_FWREV_MINOR,
54 .subminor = MLXSW_SP1_FWREV_SUBMINOR,
55 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
56};
57
58#define MLXSW_SP1_FW_FILENAME \
59 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
60 "." __stringify(MLXSW_SP1_FWREV_MINOR) \
61 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
62
63#define MLXSW_SP2_FWREV_MAJOR 29
64#define MLXSW_SP2_FWREV_MINOR 2008
65#define MLXSW_SP2_FWREV_SUBMINOR 2018
66
67static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
68 .major = MLXSW_SP2_FWREV_MAJOR,
69 .minor = MLXSW_SP2_FWREV_MINOR,
70 .subminor = MLXSW_SP2_FWREV_SUBMINOR,
71};
72
73#define MLXSW_SP2_FW_FILENAME \
74 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
75 "." __stringify(MLXSW_SP2_FWREV_MINOR) \
76 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
77
78#define MLXSW_SP3_FWREV_MAJOR 30
79#define MLXSW_SP3_FWREV_MINOR 2008
80#define MLXSW_SP3_FWREV_SUBMINOR 2018
81
82static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83 .major = MLXSW_SP3_FWREV_MAJOR,
84 .minor = MLXSW_SP3_FWREV_MINOR,
85 .subminor = MLXSW_SP3_FWREV_SUBMINOR,
86};
87
88#define MLXSW_SP3_FW_FILENAME \
89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90 "." __stringify(MLXSW_SP3_FWREV_MINOR) \
91 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
92
93static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
94static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
95static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
96
97static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
98 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
99};
100static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
101 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
102};
103
104
105
106
107
108MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
109
110
111
112
113
114
115MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
116
117
118
119
120MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
121
122
123
124
125MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
126
127
128
129
130
131MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
132
133
134
135
136MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
137
138
139
140
141
142MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
143
144
145
146
147MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
148
149
150
151
152
153
154
155
156
157MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
158
159
160
161
162
163
164MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
165
166
167
168
169
170MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
171
172int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
173 unsigned int counter_index, u64 *packets,
174 u64 *bytes)
175{
176 char mgpc_pl[MLXSW_REG_MGPC_LEN];
177 int err;
178
179 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
180 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
182 if (err)
183 return err;
184 if (packets)
185 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
186 if (bytes)
187 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
188 return 0;
189}
190
191static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
192 unsigned int counter_index)
193{
194 char mgpc_pl[MLXSW_REG_MGPC_LEN];
195
196 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
197 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
198 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
199}
200
201int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
202 unsigned int *p_counter_index)
203{
204 int err;
205
206 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
207 p_counter_index);
208 if (err)
209 return err;
210 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
211 if (err)
212 goto err_counter_clear;
213 return 0;
214
215err_counter_clear:
216 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
217 *p_counter_index);
218 return err;
219}
220
221void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
222 unsigned int counter_index)
223{
224 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
225 counter_index);
226}
227
228static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
229 const struct mlxsw_tx_info *tx_info)
230{
231 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
232
233 memset(txhdr, 0, MLXSW_TXHDR_LEN);
234
235 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
236 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
237 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
238 mlxsw_tx_hdr_swid_set(txhdr, 0);
239 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
240 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
241 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
242}
243
244enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
245{
246 switch (state) {
247 case BR_STATE_FORWARDING:
248 return MLXSW_REG_SPMS_STATE_FORWARDING;
249 case BR_STATE_LEARNING:
250 return MLXSW_REG_SPMS_STATE_LEARNING;
251 case BR_STATE_LISTENING:
252 case BR_STATE_DISABLED:
253 case BR_STATE_BLOCKING:
254 return MLXSW_REG_SPMS_STATE_DISCARDING;
255 default:
256 BUG();
257 }
258}
259
260int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
261 u8 state)
262{
263 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
264 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
265 char *spms_pl;
266 int err;
267
268 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
269 if (!spms_pl)
270 return -ENOMEM;
271 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
272 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
273
274 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
275 kfree(spms_pl);
276 return err;
277}
278
279static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
280{
281 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
282 int err;
283
284 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
285 if (err)
286 return err;
287 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
288 return 0;
289}
290
291int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
292 bool is_up)
293{
294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
295 char paos_pl[MLXSW_REG_PAOS_LEN];
296
297 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
298 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
299 MLXSW_PORT_ADMIN_STATUS_DOWN);
300 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
301}
302
303static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
304 unsigned char *addr)
305{
306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
307 char ppad_pl[MLXSW_REG_PPAD_LEN];
308
309 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
310 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
311 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
312}
313
314static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
315{
316 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
317 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
318
319 ether_addr_copy(addr, mlxsw_sp->base_mac);
320 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
321 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
322}
323
324static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
325{
326 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327 char pmtu_pl[MLXSW_REG_PMTU_LEN];
328 int err;
329
330 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
331 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
332 if (err)
333 return err;
334
335 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
336 return 0;
337}
338
339static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
340{
341 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
342 char pmtu_pl[MLXSW_REG_PMTU_LEN];
343
344 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
345 if (mtu > mlxsw_sp_port->max_mtu)
346 return -EINVAL;
347
348 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
349 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
350}
351
352static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
353{
354 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
355 char pspa_pl[MLXSW_REG_PSPA_LEN];
356
357 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
358 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
359}
360
361int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
362{
363 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364 char svpe_pl[MLXSW_REG_SVPE_LEN];
365
366 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
368}
369
370int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
371 bool learn_enable)
372{
373 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
374 char *spvmlr_pl;
375 int err;
376
377 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
378 if (!spvmlr_pl)
379 return -ENOMEM;
380 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
381 learn_enable);
382 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
383 kfree(spvmlr_pl);
384 return err;
385}
386
387int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
388{
389 switch (ethtype) {
390 case ETH_P_8021Q:
391 *p_sver_type = 0;
392 break;
393 case ETH_P_8021AD:
394 *p_sver_type = 1;
395 break;
396 default:
397 return -EINVAL;
398 }
399
400 return 0;
401}
402
403static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
404 u16 vid, u16 ethtype)
405{
406 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
407 char spvid_pl[MLXSW_REG_SPVID_LEN];
408 u8 sver_type;
409 int err;
410
411 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
412 if (err)
413 return err;
414
415 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
416 sver_type);
417
418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
419}
420
421static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
422 bool allow)
423{
424 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
425 char spaft_pl[MLXSW_REG_SPAFT_LEN];
426
427 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
428 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
429}
430
431int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
432 u16 ethtype)
433{
434 int err;
435
436 if (!vid) {
437 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
438 if (err)
439 return err;
440 } else {
441 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
442 if (err)
443 return err;
444 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
445 if (err)
446 goto err_port_allow_untagged_set;
447 }
448
449 mlxsw_sp_port->pvid = vid;
450 return 0;
451
452err_port_allow_untagged_set:
453 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
454 return err;
455}
456
457static int
458mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
459{
460 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
461 char sspr_pl[MLXSW_REG_SSPR_LEN];
462
463 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
464 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
465}
466
467static int
468mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
469 struct mlxsw_sp_port_mapping *port_mapping)
470{
471 char pmlp_pl[MLXSW_REG_PMLP_LEN];
472 bool separate_rxtx;
473 u8 module;
474 u8 width;
475 int err;
476 int i;
477
478 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
479 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
480 if (err)
481 return err;
482 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
483 width = mlxsw_reg_pmlp_width_get(pmlp_pl);
484 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
485
486 if (width && !is_power_of_2(width)) {
487 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
488 local_port);
489 return -EINVAL;
490 }
491
492 for (i = 0; i < width; i++) {
493 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
494 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
495 local_port);
496 return -EINVAL;
497 }
498 if (separate_rxtx &&
499 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
500 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
501 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
502 local_port);
503 return -EINVAL;
504 }
505 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
506 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
507 local_port);
508 return -EINVAL;
509 }
510 }
511
512 port_mapping->module = module;
513 port_mapping->width = width;
514 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
515 return 0;
516}
517
518static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
519{
520 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
521 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
522 char pmlp_pl[MLXSW_REG_PMLP_LEN];
523 int i;
524
525 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
526 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
527 for (i = 0; i < port_mapping->width; i++) {
528 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
529 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i);
530 }
531
532 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
533}
534
535static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
536{
537 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
538 char pmlp_pl[MLXSW_REG_PMLP_LEN];
539
540 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
541 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
542 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
543}
544
545static int mlxsw_sp_port_open(struct net_device *dev)
546{
547 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
548 int err;
549
550 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
551 if (err)
552 return err;
553 netif_start_queue(dev);
554 return 0;
555}
556
557static int mlxsw_sp_port_stop(struct net_device *dev)
558{
559 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
560
561 netif_stop_queue(dev);
562 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
563}
564
565static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
566 struct net_device *dev)
567{
568 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
569 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
570 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
571 const struct mlxsw_tx_info tx_info = {
572 .local_port = mlxsw_sp_port->local_port,
573 .is_emad = false,
574 };
575 u64 len;
576 int err;
577
578 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
579 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
580 dev_kfree_skb_any(skb);
581 return NETDEV_TX_OK;
582 }
583
584 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
585
586 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
587 return NETDEV_TX_BUSY;
588
589 if (eth_skb_pad(skb)) {
590 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
591 return NETDEV_TX_OK;
592 }
593
594 mlxsw_sp_txhdr_construct(skb, &tx_info);
595
596
597
598 len = skb->len - MLXSW_TXHDR_LEN;
599
600
601
602
603 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
604
605 if (!err) {
606 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
607 u64_stats_update_begin(&pcpu_stats->syncp);
608 pcpu_stats->tx_packets++;
609 pcpu_stats->tx_bytes += len;
610 u64_stats_update_end(&pcpu_stats->syncp);
611 } else {
612 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
613 dev_kfree_skb_any(skb);
614 }
615 return NETDEV_TX_OK;
616}
617
618static void mlxsw_sp_set_rx_mode(struct net_device *dev)
619{
620}
621
622static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
623{
624 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
625 struct sockaddr *addr = p;
626 int err;
627
628 if (!is_valid_ether_addr(addr->sa_data))
629 return -EADDRNOTAVAIL;
630
631 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
632 if (err)
633 return err;
634 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
635 return 0;
636}
637
638static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
639{
640 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
641 struct mlxsw_sp_hdroom orig_hdroom;
642 struct mlxsw_sp_hdroom hdroom;
643 int err;
644
645 orig_hdroom = *mlxsw_sp_port->hdroom;
646
647 hdroom = orig_hdroom;
648 hdroom.mtu = mtu;
649 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
650
651 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
652 if (err) {
653 netdev_err(dev, "Failed to configure port's headroom\n");
654 return err;
655 }
656
657 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
658 if (err)
659 goto err_port_mtu_set;
660 dev->mtu = mtu;
661 return 0;
662
663err_port_mtu_set:
664 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
665 return err;
666}
667
668static int
669mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
670 struct rtnl_link_stats64 *stats)
671{
672 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
673 struct mlxsw_sp_port_pcpu_stats *p;
674 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
675 u32 tx_dropped = 0;
676 unsigned int start;
677 int i;
678
679 for_each_possible_cpu(i) {
680 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
681 do {
682 start = u64_stats_fetch_begin_irq(&p->syncp);
683 rx_packets = p->rx_packets;
684 rx_bytes = p->rx_bytes;
685 tx_packets = p->tx_packets;
686 tx_bytes = p->tx_bytes;
687 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
688
689 stats->rx_packets += rx_packets;
690 stats->rx_bytes += rx_bytes;
691 stats->tx_packets += tx_packets;
692 stats->tx_bytes += tx_bytes;
693
694 tx_dropped += p->tx_dropped;
695 }
696 stats->tx_dropped = tx_dropped;
697 return 0;
698}
699
700static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
701{
702 switch (attr_id) {
703 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
704 return true;
705 }
706
707 return false;
708}
709
710static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
711 void *sp)
712{
713 switch (attr_id) {
714 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
715 return mlxsw_sp_port_get_sw_stats64(dev, sp);
716 }
717
718 return -EINVAL;
719}
720
721int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
722 int prio, char *ppcnt_pl)
723{
724 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
725 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
726
727 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
728 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
729}
730
731static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
732 struct rtnl_link_stats64 *stats)
733{
734 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
735 int err;
736
737 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
738 0, ppcnt_pl);
739 if (err)
740 goto out;
741
742 stats->tx_packets =
743 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
744 stats->rx_packets =
745 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
746 stats->tx_bytes =
747 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
748 stats->rx_bytes =
749 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
750 stats->multicast =
751 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
752
753 stats->rx_crc_errors =
754 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
755 stats->rx_frame_errors =
756 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
757
758 stats->rx_length_errors = (
759 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
760 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
761 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
762
763 stats->rx_errors = (stats->rx_crc_errors +
764 stats->rx_frame_errors + stats->rx_length_errors);
765
766out:
767 return err;
768}
769
770static void
771mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
772 struct mlxsw_sp_port_xstats *xstats)
773{
774 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
775 int err, i;
776
777 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
778 ppcnt_pl);
779 if (!err)
780 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
781
782 for (i = 0; i < TC_MAX_QUEUE; i++) {
783 err = mlxsw_sp_port_get_stats_raw(dev,
784 MLXSW_REG_PPCNT_TC_CONG_TC,
785 i, ppcnt_pl);
786 if (!err)
787 xstats->wred_drop[i] =
788 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
789
790 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
791 i, ppcnt_pl);
792 if (err)
793 continue;
794
795 xstats->backlog[i] =
796 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
797 xstats->tail_drop[i] =
798 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
799 }
800
801 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
802 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
803 i, ppcnt_pl);
804 if (err)
805 continue;
806
807 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
808 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
809 }
810}
811
812static void update_stats_cache(struct work_struct *work)
813{
814 struct mlxsw_sp_port *mlxsw_sp_port =
815 container_of(work, struct mlxsw_sp_port,
816 periodic_hw_stats.update_dw.work);
817
818 if (!netif_carrier_ok(mlxsw_sp_port->dev))
819
820
821
822 goto out;
823
824 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
825 &mlxsw_sp_port->periodic_hw_stats.stats);
826 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
827 &mlxsw_sp_port->periodic_hw_stats.xstats);
828
829out:
830 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
831 MLXSW_HW_STATS_UPDATE_TIME);
832}
833
834
835
836
837static void
838mlxsw_sp_port_get_stats64(struct net_device *dev,
839 struct rtnl_link_stats64 *stats)
840{
841 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
842
843 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
844}
845
846static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
847 u16 vid_begin, u16 vid_end,
848 bool is_member, bool untagged)
849{
850 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
851 char *spvm_pl;
852 int err;
853
854 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
855 if (!spvm_pl)
856 return -ENOMEM;
857
858 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
859 vid_end, is_member, untagged);
860 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
861 kfree(spvm_pl);
862 return err;
863}
864
865int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
866 u16 vid_end, bool is_member, bool untagged)
867{
868 u16 vid, vid_e;
869 int err;
870
871 for (vid = vid_begin; vid <= vid_end;
872 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
873 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
874 vid_end);
875
876 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
877 is_member, untagged);
878 if (err)
879 return err;
880 }
881
882 return 0;
883}
884
885static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
886 bool flush_default)
887{
888 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
889
890 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
891 &mlxsw_sp_port->vlans_list, list) {
892 if (!flush_default &&
893 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
894 continue;
895 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
896 }
897}
898
899static void
900mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
901{
902 if (mlxsw_sp_port_vlan->bridge_port)
903 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
904 else if (mlxsw_sp_port_vlan->fid)
905 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
906}
907
908struct mlxsw_sp_port_vlan *
909mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
910{
911 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
912 bool untagged = vid == MLXSW_SP_DEFAULT_VID;
913 int err;
914
915 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
916 if (mlxsw_sp_port_vlan)
917 return ERR_PTR(-EEXIST);
918
919 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
920 if (err)
921 return ERR_PTR(err);
922
923 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
924 if (!mlxsw_sp_port_vlan) {
925 err = -ENOMEM;
926 goto err_port_vlan_alloc;
927 }
928
929 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
930 mlxsw_sp_port_vlan->vid = vid;
931 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
932
933 return mlxsw_sp_port_vlan;
934
935err_port_vlan_alloc:
936 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
937 return ERR_PTR(err);
938}
939
940void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
941{
942 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
943 u16 vid = mlxsw_sp_port_vlan->vid;
944
945 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
946 list_del(&mlxsw_sp_port_vlan->list);
947 kfree(mlxsw_sp_port_vlan);
948 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
949}
950
951static int mlxsw_sp_port_add_vid(struct net_device *dev,
952 __be16 __always_unused proto, u16 vid)
953{
954 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
955
956
957
958
959 if (!vid)
960 return 0;
961
962 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
963}
964
965static int mlxsw_sp_port_kill_vid(struct net_device *dev,
966 __be16 __always_unused proto, u16 vid)
967{
968 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
969 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
970
971
972
973
974 if (!vid)
975 return 0;
976
977 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
978 if (!mlxsw_sp_port_vlan)
979 return 0;
980 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
981
982 return 0;
983}
984
985static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
986 struct flow_block_offload *f)
987{
988 switch (f->binder_type) {
989 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
990 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
991 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
992 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
993 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
994 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
995 default:
996 return -EOPNOTSUPP;
997 }
998}
999
1000static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1001 void *type_data)
1002{
1003 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1004
1005 switch (type) {
1006 case TC_SETUP_BLOCK:
1007 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1008 case TC_SETUP_QDISC_RED:
1009 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1010 case TC_SETUP_QDISC_PRIO:
1011 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1012 case TC_SETUP_QDISC_ETS:
1013 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1014 case TC_SETUP_QDISC_TBF:
1015 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1016 case TC_SETUP_QDISC_FIFO:
1017 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1018 default:
1019 return -EOPNOTSUPP;
1020 }
1021}
1022
1023static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1024{
1025 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1026
1027 if (!enable) {
1028 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1029 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1030 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1031 return -EINVAL;
1032 }
1033 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1034 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1035 } else {
1036 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1037 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1038 }
1039 return 0;
1040}
1041
1042static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1043{
1044 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1045 char pplr_pl[MLXSW_REG_PPLR_LEN];
1046 int err;
1047
1048 if (netif_running(dev))
1049 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1050
1051 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1052 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1053 pplr_pl);
1054
1055 if (netif_running(dev))
1056 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1057
1058 return err;
1059}
1060
1061typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1062
1063static int mlxsw_sp_handle_feature(struct net_device *dev,
1064 netdev_features_t wanted_features,
1065 netdev_features_t feature,
1066 mlxsw_sp_feature_handler feature_handler)
1067{
1068 netdev_features_t changes = wanted_features ^ dev->features;
1069 bool enable = !!(wanted_features & feature);
1070 int err;
1071
1072 if (!(changes & feature))
1073 return 0;
1074
1075 err = feature_handler(dev, enable);
1076 if (err) {
1077 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1078 enable ? "Enable" : "Disable", &feature, err);
1079 return err;
1080 }
1081
1082 if (enable)
1083 dev->features |= feature;
1084 else
1085 dev->features &= ~feature;
1086
1087 return 0;
1088}
1089static int mlxsw_sp_set_features(struct net_device *dev,
1090 netdev_features_t features)
1091{
1092 netdev_features_t oper_features = dev->features;
1093 int err = 0;
1094
1095 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1096 mlxsw_sp_feature_hw_tc);
1097 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1098 mlxsw_sp_feature_loopback);
1099
1100 if (err) {
1101 dev->features = oper_features;
1102 return -EINVAL;
1103 }
1104
1105 return 0;
1106}
1107
1108static struct devlink_port *
1109mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1110{
1111 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1112 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1113
1114 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1115 mlxsw_sp_port->local_port);
1116}
1117
1118static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1119 struct ifreq *ifr)
1120{
1121 struct hwtstamp_config config;
1122 int err;
1123
1124 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1125 return -EFAULT;
1126
1127 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1128 &config);
1129 if (err)
1130 return err;
1131
1132 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1133 return -EFAULT;
1134
1135 return 0;
1136}
1137
1138static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1139 struct ifreq *ifr)
1140{
1141 struct hwtstamp_config config;
1142 int err;
1143
1144 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1145 &config);
1146 if (err)
1147 return err;
1148
1149 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1150 return -EFAULT;
1151
1152 return 0;
1153}
1154
1155static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1156{
1157 struct hwtstamp_config config = {0};
1158
1159 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1160}
1161
1162static int
1163mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1164{
1165 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1166
1167 switch (cmd) {
1168 case SIOCSHWTSTAMP:
1169 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1170 case SIOCGHWTSTAMP:
1171 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1172 default:
1173 return -EOPNOTSUPP;
1174 }
1175}
1176
1177static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1178 .ndo_open = mlxsw_sp_port_open,
1179 .ndo_stop = mlxsw_sp_port_stop,
1180 .ndo_start_xmit = mlxsw_sp_port_xmit,
1181 .ndo_setup_tc = mlxsw_sp_setup_tc,
1182 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1183 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1184 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1185 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1186 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1187 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1188 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1189 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1190 .ndo_set_features = mlxsw_sp_set_features,
1191 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
1192 .ndo_do_ioctl = mlxsw_sp_port_ioctl,
1193};
1194
1195static int
1196mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1197{
1198 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1199 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1200 const struct mlxsw_sp_port_type_speed_ops *ops;
1201 char ptys_pl[MLXSW_REG_PTYS_LEN];
1202 u32 eth_proto_cap_masked;
1203 int err;
1204
1205 ops = mlxsw_sp->port_type_speed_ops;
1206
1207
1208
1209
1210 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1211 0, false);
1212 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1213 if (err)
1214 return err;
1215
1216 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap,
1217 ð_proto_admin, ð_proto_oper);
1218 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1219 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1220 eth_proto_cap_masked,
1221 mlxsw_sp_port->link.autoneg);
1222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1223}
1224
1225int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1226{
1227 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1228 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1229 char ptys_pl[MLXSW_REG_PTYS_LEN];
1230 u32 eth_proto_oper;
1231 int err;
1232
1233 port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1234 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1235 mlxsw_sp_port->local_port, 0,
1236 false);
1237 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1238 if (err)
1239 return err;
1240 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1241 ð_proto_oper);
1242 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1243 return 0;
1244}
1245
1246int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1247 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1248 bool dwrr, u8 dwrr_weight)
1249{
1250 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1251 char qeec_pl[MLXSW_REG_QEEC_LEN];
1252
1253 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1254 next_index);
1255 mlxsw_reg_qeec_de_set(qeec_pl, true);
1256 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1257 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1259}
1260
1261int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1262 enum mlxsw_reg_qeec_hr hr, u8 index,
1263 u8 next_index, u32 maxrate, u8 burst_size)
1264{
1265 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1266 char qeec_pl[MLXSW_REG_QEEC_LEN];
1267
1268 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1269 next_index);
1270 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1271 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1272 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1273 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1274}
1275
1276static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1277 enum mlxsw_reg_qeec_hr hr, u8 index,
1278 u8 next_index, u32 minrate)
1279{
1280 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1281 char qeec_pl[MLXSW_REG_QEEC_LEN];
1282
1283 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1284 next_index);
1285 mlxsw_reg_qeec_mise_set(qeec_pl, true);
1286 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1287
1288 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1289}
1290
1291int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1292 u8 switch_prio, u8 tclass)
1293{
1294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1295 char qtct_pl[MLXSW_REG_QTCT_LEN];
1296
1297 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1298 tclass);
1299 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1300}
1301
1302static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1303{
1304 int err, i;
1305
1306
1307
1308
1309 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1310 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1311 if (err)
1312 return err;
1313 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1314 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1315 MLXSW_REG_QEEC_HR_SUBGROUP, i,
1316 0, false, 0);
1317 if (err)
1318 return err;
1319 }
1320 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1321 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1322 MLXSW_REG_QEEC_HR_TC, i, i,
1323 false, 0);
1324 if (err)
1325 return err;
1326
1327 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1328 MLXSW_REG_QEEC_HR_TC,
1329 i + 8, i,
1330 true, 100);
1331 if (err)
1332 return err;
1333 }
1334
1335
1336
1337
1338
1339 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1340 MLXSW_REG_QEEC_HR_PORT, 0, 0,
1341 MLXSW_REG_QEEC_MAS_DIS, 0);
1342 if (err)
1343 return err;
1344 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1345 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1346 MLXSW_REG_QEEC_HR_SUBGROUP,
1347 i, 0,
1348 MLXSW_REG_QEEC_MAS_DIS, 0);
1349 if (err)
1350 return err;
1351 }
1352 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1353 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1354 MLXSW_REG_QEEC_HR_TC,
1355 i, i,
1356 MLXSW_REG_QEEC_MAS_DIS, 0);
1357 if (err)
1358 return err;
1359
1360 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1361 MLXSW_REG_QEEC_HR_TC,
1362 i + 8, i,
1363 MLXSW_REG_QEEC_MAS_DIS, 0);
1364 if (err)
1365 return err;
1366 }
1367
1368
1369 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1370 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1371 MLXSW_REG_QEEC_HR_TC,
1372 i + 8, i,
1373 MLXSW_REG_QEEC_MIS_MIN);
1374 if (err)
1375 return err;
1376 }
1377
1378
1379 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1380 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1381 if (err)
1382 return err;
1383 }
1384
1385 return 0;
1386}
1387
1388static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1389 bool enable)
1390{
1391 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1392 char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1393
1394 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1395 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1396}
1397
1398static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1399{
1400 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1401 u8 module = mlxsw_sp_port->mapping.module;
1402 u64 overheat_counter;
1403 int err;
1404
1405 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
1406 &overheat_counter);
1407 if (err)
1408 return err;
1409
1410 mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1411 return 0;
1412}
1413
1414int
1415mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1416 bool is_8021ad_tagged,
1417 bool is_8021q_tagged)
1418{
1419 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1420 char spvc_pl[MLXSW_REG_SPVC_LEN];
1421
1422 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1423 is_8021ad_tagged, is_8021q_tagged);
1424 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1425}
1426
1427static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1428 u8 split_base_local_port,
1429 struct mlxsw_sp_port_mapping *port_mapping)
1430{
1431 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1432 bool split = !!split_base_local_port;
1433 struct mlxsw_sp_port *mlxsw_sp_port;
1434 u32 lanes = port_mapping->width;
1435 struct net_device *dev;
1436 bool splittable;
1437 int err;
1438
1439 splittable = lanes > 1 && !split;
1440 err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
1441 port_mapping->module + 1, split,
1442 port_mapping->lane / lanes,
1443 splittable, lanes,
1444 mlxsw_sp->base_mac,
1445 sizeof(mlxsw_sp->base_mac));
1446 if (err) {
1447 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1448 local_port);
1449 return err;
1450 }
1451
1452 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1453 if (!dev) {
1454 err = -ENOMEM;
1455 goto err_alloc_etherdev;
1456 }
1457 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1458 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1459 mlxsw_sp_port = netdev_priv(dev);
1460 mlxsw_sp_port->dev = dev;
1461 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1462 mlxsw_sp_port->local_port = local_port;
1463 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1464 mlxsw_sp_port->split = split;
1465 mlxsw_sp_port->split_base_local_port = split_base_local_port;
1466 mlxsw_sp_port->mapping = *port_mapping;
1467 mlxsw_sp_port->link.autoneg = 1;
1468 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1469
1470 mlxsw_sp_port->pcpu_stats =
1471 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1472 if (!mlxsw_sp_port->pcpu_stats) {
1473 err = -ENOMEM;
1474 goto err_alloc_stats;
1475 }
1476
1477 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1478 &update_stats_cache);
1479
1480 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1481 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1482
1483 err = mlxsw_sp_port_module_map(mlxsw_sp_port);
1484 if (err) {
1485 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1486 mlxsw_sp_port->local_port);
1487 goto err_port_module_map;
1488 }
1489
1490 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1491 if (err) {
1492 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1493 mlxsw_sp_port->local_port);
1494 goto err_port_swid_set;
1495 }
1496
1497 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1498 if (err) {
1499 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1500 mlxsw_sp_port->local_port);
1501 goto err_dev_addr_init;
1502 }
1503
1504 netif_carrier_off(dev);
1505
1506 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1507 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1508 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1509
1510 dev->min_mtu = 0;
1511 dev->max_mtu = ETH_MAX_MTU;
1512
1513
1514
1515
1516 dev->needed_headroom = MLXSW_TXHDR_LEN;
1517
1518 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1519 if (err) {
1520 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1521 mlxsw_sp_port->local_port);
1522 goto err_port_system_port_mapping_set;
1523 }
1524
1525 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1526 if (err) {
1527 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1528 mlxsw_sp_port->local_port);
1529 goto err_port_speed_by_width_set;
1530 }
1531
1532 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1533 &mlxsw_sp_port->max_speed);
1534 if (err) {
1535 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1536 mlxsw_sp_port->local_port);
1537 goto err_max_speed_get;
1538 }
1539
1540 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1541 if (err) {
1542 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1543 mlxsw_sp_port->local_port);
1544 goto err_port_max_mtu_get;
1545 }
1546
1547 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1548 if (err) {
1549 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1550 mlxsw_sp_port->local_port);
1551 goto err_port_mtu_set;
1552 }
1553
1554 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1555 if (err)
1556 goto err_port_admin_status_set;
1557
1558 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1559 if (err) {
1560 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1561 mlxsw_sp_port->local_port);
1562 goto err_port_buffers_init;
1563 }
1564
1565 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1566 if (err) {
1567 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1568 mlxsw_sp_port->local_port);
1569 goto err_port_ets_init;
1570 }
1571
1572 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1573 if (err) {
1574 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1575 mlxsw_sp_port->local_port);
1576 goto err_port_tc_mc_mode;
1577 }
1578
1579
1580 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1581 if (err) {
1582 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1583 mlxsw_sp_port->local_port);
1584 goto err_port_dcb_init;
1585 }
1586
1587 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1588 if (err) {
1589 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1590 mlxsw_sp_port->local_port);
1591 goto err_port_fids_init;
1592 }
1593
1594 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1595 if (err) {
1596 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1597 mlxsw_sp_port->local_port);
1598 goto err_port_qdiscs_init;
1599 }
1600
1601 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1602 false);
1603 if (err) {
1604 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1605 mlxsw_sp_port->local_port);
1606 goto err_port_vlan_clear;
1607 }
1608
1609 err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1610 if (err) {
1611 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1612 mlxsw_sp_port->local_port);
1613 goto err_port_nve_init;
1614 }
1615
1616 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1617 ETH_P_8021Q);
1618 if (err) {
1619 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1620 mlxsw_sp_port->local_port);
1621 goto err_port_pvid_set;
1622 }
1623
1624 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1625 MLXSW_SP_DEFAULT_VID);
1626 if (IS_ERR(mlxsw_sp_port_vlan)) {
1627 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1628 mlxsw_sp_port->local_port);
1629 err = PTR_ERR(mlxsw_sp_port_vlan);
1630 goto err_port_vlan_create;
1631 }
1632 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1633
1634
1635
1636
1637 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1638 if (err) {
1639 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1640 local_port);
1641 goto err_port_vlan_classification_set;
1642 }
1643
1644 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1645 mlxsw_sp->ptp_ops->shaper_work);
1646
1647 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1648
1649 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1650 if (err) {
1651 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1652 mlxsw_sp_port->local_port);
1653 goto err_port_overheat_init_val_set;
1654 }
1655
1656 err = register_netdev(dev);
1657 if (err) {
1658 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1659 mlxsw_sp_port->local_port);
1660 goto err_register_netdev;
1661 }
1662
1663 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1664 mlxsw_sp_port, dev);
1665 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1666 return 0;
1667
1668err_register_netdev:
1669err_port_overheat_init_val_set:
1670 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1671err_port_vlan_classification_set:
1672 mlxsw_sp->ports[local_port] = NULL;
1673 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1674err_port_vlan_create:
1675err_port_pvid_set:
1676 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1677err_port_nve_init:
1678err_port_vlan_clear:
1679 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1680err_port_qdiscs_init:
1681 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1682err_port_fids_init:
1683 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1684err_port_dcb_init:
1685 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1686err_port_tc_mc_mode:
1687err_port_ets_init:
1688 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1689err_port_buffers_init:
1690err_port_admin_status_set:
1691err_port_mtu_set:
1692err_port_max_mtu_get:
1693err_max_speed_get:
1694err_port_speed_by_width_set:
1695err_port_system_port_mapping_set:
1696err_dev_addr_init:
1697 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1698err_port_swid_set:
1699 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1700err_port_module_map:
1701 free_percpu(mlxsw_sp_port->pcpu_stats);
1702err_alloc_stats:
1703 free_netdev(dev);
1704err_alloc_etherdev:
1705 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1706 return err;
1707}
1708
1709static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1710{
1711 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1712
1713 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1714 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1715 mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1716 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1717 unregister_netdev(mlxsw_sp_port->dev);
1718 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1719 mlxsw_sp->ports[local_port] = NULL;
1720 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1721 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1722 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1723 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1724 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1725 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1726 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1727 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1728 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1729 free_percpu(mlxsw_sp_port->pcpu_stats);
1730 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1731 free_netdev(mlxsw_sp_port->dev);
1732 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1733}
1734
1735static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1736{
1737 struct mlxsw_sp_port *mlxsw_sp_port;
1738 int err;
1739
1740 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1741 if (!mlxsw_sp_port)
1742 return -ENOMEM;
1743
1744 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1745 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1746
1747 err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1748 mlxsw_sp_port,
1749 mlxsw_sp->base_mac,
1750 sizeof(mlxsw_sp->base_mac));
1751 if (err) {
1752 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1753 goto err_core_cpu_port_init;
1754 }
1755
1756 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1757 return 0;
1758
1759err_core_cpu_port_init:
1760 kfree(mlxsw_sp_port);
1761 return err;
1762}
1763
1764static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1765{
1766 struct mlxsw_sp_port *mlxsw_sp_port =
1767 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1768
1769 mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1770 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1771 kfree(mlxsw_sp_port);
1772}
1773
1774static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1775{
1776 return mlxsw_sp->ports[local_port] != NULL;
1777}
1778
1779static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1780{
1781 int i;
1782
1783 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1784 if (mlxsw_sp_port_created(mlxsw_sp, i))
1785 mlxsw_sp_port_remove(mlxsw_sp, i);
1786 mlxsw_sp_cpu_port_remove(mlxsw_sp);
1787 kfree(mlxsw_sp->ports);
1788 mlxsw_sp->ports = NULL;
1789}
1790
1791static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1792{
1793 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1794 struct mlxsw_sp_port_mapping *port_mapping;
1795 size_t alloc_size;
1796 int i;
1797 int err;
1798
1799 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
1800 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1801 if (!mlxsw_sp->ports)
1802 return -ENOMEM;
1803
1804 err = mlxsw_sp_cpu_port_create(mlxsw_sp);
1805 if (err)
1806 goto err_cpu_port_create;
1807
1808 for (i = 1; i < max_ports; i++) {
1809 port_mapping = mlxsw_sp->port_mapping[i];
1810 if (!port_mapping)
1811 continue;
1812 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
1813 if (err)
1814 goto err_port_create;
1815 }
1816 return 0;
1817
1818err_port_create:
1819 for (i--; i >= 1; i--)
1820 if (mlxsw_sp_port_created(mlxsw_sp, i))
1821 mlxsw_sp_port_remove(mlxsw_sp, i);
1822 mlxsw_sp_cpu_port_remove(mlxsw_sp);
1823err_cpu_port_create:
1824 kfree(mlxsw_sp->ports);
1825 mlxsw_sp->ports = NULL;
1826 return err;
1827}
1828
1829static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
1830{
1831 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1832 struct mlxsw_sp_port_mapping port_mapping;
1833 int i;
1834 int err;
1835
1836 mlxsw_sp->port_mapping = kcalloc(max_ports,
1837 sizeof(struct mlxsw_sp_port_mapping *),
1838 GFP_KERNEL);
1839 if (!mlxsw_sp->port_mapping)
1840 return -ENOMEM;
1841
1842 for (i = 1; i < max_ports; i++) {
1843 if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
1844 continue;
1845
1846 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
1847 if (err)
1848 goto err_port_module_info_get;
1849 if (!port_mapping.width)
1850 continue;
1851
1852 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
1853 sizeof(port_mapping),
1854 GFP_KERNEL);
1855 if (!mlxsw_sp->port_mapping[i]) {
1856 err = -ENOMEM;
1857 goto err_port_module_info_dup;
1858 }
1859 }
1860 return 0;
1861
1862err_port_module_info_get:
1863err_port_module_info_dup:
1864 for (i--; i >= 1; i--)
1865 kfree(mlxsw_sp->port_mapping[i]);
1866 kfree(mlxsw_sp->port_mapping);
1867 return err;
1868}
1869
1870static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
1871{
1872 int i;
1873
1874 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1875 kfree(mlxsw_sp->port_mapping[i]);
1876 kfree(mlxsw_sp->port_mapping);
1877}
1878
1879static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
1880{
1881 u8 offset = (local_port - 1) % max_width;
1882
1883 return local_port - offset;
1884}
1885
1886static int
1887mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1888 struct mlxsw_sp_port_mapping *port_mapping,
1889 unsigned int count, u8 offset)
1890{
1891 struct mlxsw_sp_port_mapping split_port_mapping;
1892 int err, i;
1893
1894 split_port_mapping = *port_mapping;
1895 split_port_mapping.width /= count;
1896 for (i = 0; i < count; i++) {
1897 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
1898 base_port, &split_port_mapping);
1899 if (err)
1900 goto err_port_create;
1901 split_port_mapping.lane += split_port_mapping.width;
1902 }
1903
1904 return 0;
1905
1906err_port_create:
1907 for (i--; i >= 0; i--)
1908 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1909 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1910 return err;
1911}
1912
1913static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1914 u8 base_port,
1915 unsigned int count, u8 offset)
1916{
1917 struct mlxsw_sp_port_mapping *port_mapping;
1918 int i;
1919
1920
1921 for (i = 0; i < count * offset; i++) {
1922 port_mapping = mlxsw_sp->port_mapping[base_port + i];
1923 if (!port_mapping)
1924 continue;
1925 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
1926 }
1927}
1928
1929static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
1930 unsigned int count,
1931 unsigned int max_width)
1932{
1933 enum mlxsw_res_id local_ports_in_x_res_id;
1934 int split_width = max_width / count;
1935
1936 if (split_width == 1)
1937 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
1938 else if (split_width == 2)
1939 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
1940 else if (split_width == 4)
1941 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
1942 else
1943 return -EINVAL;
1944
1945 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
1946 return -EINVAL;
1947 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
1948}
1949
1950static struct mlxsw_sp_port *
1951mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1952{
1953 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
1954 return mlxsw_sp->ports[local_port];
1955 return NULL;
1956}
1957
1958static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1959 unsigned int count,
1960 struct netlink_ext_ack *extack)
1961{
1962 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1963 struct mlxsw_sp_port_mapping port_mapping;
1964 struct mlxsw_sp_port *mlxsw_sp_port;
1965 int max_width;
1966 u8 base_port;
1967 int offset;
1968 int i;
1969 int err;
1970
1971 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
1972 if (!mlxsw_sp_port) {
1973 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1974 local_port);
1975 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
1976 return -EINVAL;
1977 }
1978
1979 max_width = mlxsw_core_module_max_width(mlxsw_core,
1980 mlxsw_sp_port->mapping.module);
1981 if (max_width < 0) {
1982 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
1983 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
1984 return max_width;
1985 }
1986
1987
1988 if (mlxsw_sp_port->mapping.width != max_width) {
1989 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
1990 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
1991 return -EINVAL;
1992 }
1993
1994 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
1995 if (offset < 0) {
1996 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
1997 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
1998 return -EINVAL;
1999 }
2000
2001
2002
2003
2004 base_port = count == max_width ?
2005 mlxsw_sp_cluster_base_port_get(local_port, max_width) :
2006 local_port;
2007
2008 for (i = 0; i < count * offset; i++) {
2009
2010
2011
2012 if (i == 0 || (count == max_width && i == count / 2))
2013 continue;
2014
2015 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
2016 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2017 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
2018 return -EINVAL;
2019 }
2020 }
2021
2022 port_mapping = mlxsw_sp_port->mapping;
2023
2024 for (i = 0; i < count; i++)
2025 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2026 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2027
2028 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
2029 count, offset);
2030 if (err) {
2031 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2032 goto err_port_split_create;
2033 }
2034
2035 return 0;
2036
2037err_port_split_create:
2038 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2039 return err;
2040}
2041
2042static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
2043 struct netlink_ext_ack *extack)
2044{
2045 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2046 struct mlxsw_sp_port *mlxsw_sp_port;
2047 unsigned int count;
2048 int max_width;
2049 u8 base_port;
2050 int offset;
2051 int i;
2052
2053 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2054 if (!mlxsw_sp_port) {
2055 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2056 local_port);
2057 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2058 return -EINVAL;
2059 }
2060
2061 if (!mlxsw_sp_port->split) {
2062 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
2063 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2064 return -EINVAL;
2065 }
2066
2067 max_width = mlxsw_core_module_max_width(mlxsw_core,
2068 mlxsw_sp_port->mapping.module);
2069 if (max_width < 0) {
2070 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2071 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2072 return max_width;
2073 }
2074
2075 count = max_width / mlxsw_sp_port->mapping.width;
2076
2077 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2078 if (WARN_ON(offset < 0)) {
2079 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2080 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2081 return -EINVAL;
2082 }
2083
2084 base_port = mlxsw_sp_port->split_base_local_port;
2085
2086 for (i = 0; i < count; i++)
2087 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2088 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2089
2090 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2091
2092 return 0;
2093}
2094
2095static void
2096mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2097{
2098 int i;
2099
2100 for (i = 0; i < TC_MAX_QUEUE; i++)
2101 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2102}
2103
2104static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2105 char *pude_pl, void *priv)
2106{
2107 struct mlxsw_sp *mlxsw_sp = priv;
2108 struct mlxsw_sp_port *mlxsw_sp_port;
2109 enum mlxsw_reg_pude_oper_status status;
2110 u8 local_port;
2111
2112 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2113 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2114 if (!mlxsw_sp_port)
2115 return;
2116
2117 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2118 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2119 netdev_info(mlxsw_sp_port->dev, "link up\n");
2120 netif_carrier_on(mlxsw_sp_port->dev);
2121 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2122 } else {
2123 netdev_info(mlxsw_sp_port->dev, "link down\n");
2124 netif_carrier_off(mlxsw_sp_port->dev);
2125 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2126 }
2127}
2128
2129static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2130 char *mtpptr_pl, bool ingress)
2131{
2132 u8 local_port;
2133 u8 num_rec;
2134 int i;
2135
2136 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2137 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2138 for (i = 0; i < num_rec; i++) {
2139 u8 domain_number;
2140 u8 message_type;
2141 u16 sequence_id;
2142 u64 timestamp;
2143
2144 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2145 &domain_number, &sequence_id,
2146 ×tamp);
2147 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2148 message_type, domain_number,
2149 sequence_id, timestamp);
2150 }
2151}
2152
2153static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2154 char *mtpptr_pl, void *priv)
2155{
2156 struct mlxsw_sp *mlxsw_sp = priv;
2157
2158 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2159}
2160
2161static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2162 char *mtpptr_pl, void *priv)
2163{
2164 struct mlxsw_sp *mlxsw_sp = priv;
2165
2166 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2167}
2168
2169void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2170 u8 local_port, void *priv)
2171{
2172 struct mlxsw_sp *mlxsw_sp = priv;
2173 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2174 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2175
2176 if (unlikely(!mlxsw_sp_port)) {
2177 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2178 local_port);
2179 return;
2180 }
2181
2182 skb->dev = mlxsw_sp_port->dev;
2183
2184 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2185 u64_stats_update_begin(&pcpu_stats->syncp);
2186 pcpu_stats->rx_packets++;
2187 pcpu_stats->rx_bytes += skb->len;
2188 u64_stats_update_end(&pcpu_stats->syncp);
2189
2190 skb->protocol = eth_type_trans(skb, skb->dev);
2191 netif_receive_skb(skb);
2192}
2193
2194static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2195 void *priv)
2196{
2197 skb->offload_fwd_mark = 1;
2198 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2199}
2200
2201static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2202 u8 local_port, void *priv)
2203{
2204 skb->offload_l3_fwd_mark = 1;
2205 skb->offload_fwd_mark = 1;
2206 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2207}
2208
2209void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2210 u8 local_port)
2211{
2212 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2213}
2214
2215void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2216 u8 local_port)
2217{
2218 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2219 struct mlxsw_sp_port_sample *sample;
2220 u32 size;
2221
2222 if (unlikely(!mlxsw_sp_port)) {
2223 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2224 local_port);
2225 goto out;
2226 }
2227
2228 rcu_read_lock();
2229 sample = rcu_dereference(mlxsw_sp_port->sample);
2230 if (!sample)
2231 goto out_unlock;
2232 size = sample->truncate ? sample->trunc_size : skb->len;
2233 psample_sample_packet(sample->psample_group, skb, size,
2234 mlxsw_sp_port->dev->ifindex, 0, sample->rate);
2235out_unlock:
2236 rcu_read_unlock();
2237out:
2238 consume_skb(skb);
2239}
2240
2241#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2242 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2243 _is_ctrl, SP_##_trap_group, DISCARD)
2244
2245#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2246 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2247 _is_ctrl, SP_##_trap_group, DISCARD)
2248
2249#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2250 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2251 _is_ctrl, SP_##_trap_group, DISCARD)
2252
2253#define MLXSW_SP_EVENTL(_func, _trap_id) \
2254 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2255
2256static const struct mlxsw_listener mlxsw_sp_listener[] = {
2257
2258 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2259
2260 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2261
2262 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2263 false),
2264 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2265 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2266 false),
2267 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2268 ROUTER_EXP, false),
2269 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2270 ROUTER_EXP, false),
2271 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2272 ROUTER_EXP, false),
2273 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2274 ROUTER_EXP, false),
2275
2276 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2277 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2278
2279 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2280};
2281
2282static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2283
2284 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2285 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2286};
2287
2288static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2289{
2290 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2291 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2292 enum mlxsw_reg_qpcr_ir_units ir_units;
2293 int max_cpu_policers;
2294 bool is_bytes;
2295 u8 burst_size;
2296 u32 rate;
2297 int i, err;
2298
2299 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2300 return -EIO;
2301
2302 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2303
2304 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2305 for (i = 0; i < max_cpu_policers; i++) {
2306 is_bytes = false;
2307 switch (i) {
2308 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2309 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2310 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2311 rate = 1024;
2312 burst_size = 7;
2313 break;
2314 default:
2315 continue;
2316 }
2317
2318 __set_bit(i, mlxsw_sp->trap->policers_usage);
2319 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2320 burst_size);
2321 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2322 if (err)
2323 return err;
2324 }
2325
2326 return 0;
2327}
2328
2329static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2330{
2331 char htgt_pl[MLXSW_REG_HTGT_LEN];
2332 enum mlxsw_reg_htgt_trap_group i;
2333 int max_cpu_policers;
2334 int max_trap_groups;
2335 u8 priority, tc;
2336 u16 policer_id;
2337 int err;
2338
2339 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2340 return -EIO;
2341
2342 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2343 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2344
2345 for (i = 0; i < max_trap_groups; i++) {
2346 policer_id = i;
2347 switch (i) {
2348 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2349 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2350 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2351 priority = 1;
2352 tc = 1;
2353 break;
2354 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2355 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2356 tc = MLXSW_REG_HTGT_DEFAULT_TC;
2357 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2358 break;
2359 default:
2360 continue;
2361 }
2362
2363 if (max_cpu_policers <= policer_id &&
2364 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2365 return -EIO;
2366
2367 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2368 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2369 if (err)
2370 return err;
2371 }
2372
2373 return 0;
2374}
2375
2376static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
2377 const struct mlxsw_listener listeners[],
2378 size_t listeners_count)
2379{
2380 int i;
2381 int err;
2382
2383 for (i = 0; i < listeners_count; i++) {
2384 err = mlxsw_core_trap_register(mlxsw_sp->core,
2385 &listeners[i],
2386 mlxsw_sp);
2387 if (err)
2388 goto err_listener_register;
2389
2390 }
2391 return 0;
2392
2393err_listener_register:
2394 for (i--; i >= 0; i--) {
2395 mlxsw_core_trap_unregister(mlxsw_sp->core,
2396 &listeners[i],
2397 mlxsw_sp);
2398 }
2399 return err;
2400}
2401
2402static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
2403 const struct mlxsw_listener listeners[],
2404 size_t listeners_count)
2405{
2406 int i;
2407
2408 for (i = 0; i < listeners_count; i++) {
2409 mlxsw_core_trap_unregister(mlxsw_sp->core,
2410 &listeners[i],
2411 mlxsw_sp);
2412 }
2413}
2414
2415static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2416{
2417 struct mlxsw_sp_trap *trap;
2418 u64 max_policers;
2419 int err;
2420
2421 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2422 return -EIO;
2423 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2424 trap = kzalloc(struct_size(trap, policers_usage,
2425 BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2426 if (!trap)
2427 return -ENOMEM;
2428 trap->max_policers = max_policers;
2429 mlxsw_sp->trap = trap;
2430
2431 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2432 if (err)
2433 goto err_cpu_policers_set;
2434
2435 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2436 if (err)
2437 goto err_trap_groups_set;
2438
2439 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
2440 ARRAY_SIZE(mlxsw_sp_listener));
2441 if (err)
2442 goto err_traps_register;
2443
2444 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
2445 mlxsw_sp->listeners_count);
2446 if (err)
2447 goto err_extra_traps_init;
2448
2449 return 0;
2450
2451err_extra_traps_init:
2452 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2453 ARRAY_SIZE(mlxsw_sp_listener));
2454err_traps_register:
2455err_trap_groups_set:
2456err_cpu_policers_set:
2457 kfree(trap);
2458 return err;
2459}
2460
2461static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2462{
2463 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
2464 mlxsw_sp->listeners_count);
2465 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2466 ARRAY_SIZE(mlxsw_sp_listener));
2467 kfree(mlxsw_sp->trap);
2468}
2469
2470#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2471
2472static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2473{
2474 char slcr_pl[MLXSW_REG_SLCR_LEN];
2475 u32 seed;
2476 int err;
2477
2478 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2479 MLXSW_SP_LAG_SEED_INIT);
2480 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2481 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2482 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2483 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2484 MLXSW_REG_SLCR_LAG_HASH_SIP |
2485 MLXSW_REG_SLCR_LAG_HASH_DIP |
2486 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2487 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2488 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2489 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2490 if (err)
2491 return err;
2492
2493 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2494 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2495 return -EIO;
2496
2497 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2498 sizeof(struct mlxsw_sp_upper),
2499 GFP_KERNEL);
2500 if (!mlxsw_sp->lags)
2501 return -ENOMEM;
2502
2503 return 0;
2504}
2505
2506static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2507{
2508 kfree(mlxsw_sp->lags);
2509}
2510
2511static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
2512{
2513 char htgt_pl[MLXSW_REG_HTGT_LEN];
2514 int err;
2515
2516 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
2517 MLXSW_REG_HTGT_INVALID_POLICER,
2518 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2519 MLXSW_REG_HTGT_DEFAULT_TC);
2520 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2521 if (err)
2522 return err;
2523
2524 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
2525 MLXSW_REG_HTGT_INVALID_POLICER,
2526 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2527 MLXSW_REG_HTGT_DEFAULT_TC);
2528 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2529 if (err)
2530 return err;
2531
2532 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
2533 MLXSW_REG_HTGT_INVALID_POLICER,
2534 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2535 MLXSW_REG_HTGT_DEFAULT_TC);
2536 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2537 if (err)
2538 return err;
2539
2540 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
2541 MLXSW_REG_HTGT_INVALID_POLICER,
2542 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2543 MLXSW_REG_HTGT_DEFAULT_TC);
2544 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2545}
2546
2547static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2548 .clock_init = mlxsw_sp1_ptp_clock_init,
2549 .clock_fini = mlxsw_sp1_ptp_clock_fini,
2550 .init = mlxsw_sp1_ptp_init,
2551 .fini = mlxsw_sp1_ptp_fini,
2552 .receive = mlxsw_sp1_ptp_receive,
2553 .transmitted = mlxsw_sp1_ptp_transmitted,
2554 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
2555 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
2556 .shaper_work = mlxsw_sp1_ptp_shaper_work,
2557 .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
2558 .get_stats_count = mlxsw_sp1_get_stats_count,
2559 .get_stats_strings = mlxsw_sp1_get_stats_strings,
2560 .get_stats = mlxsw_sp1_get_stats,
2561};
2562
2563static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2564 .clock_init = mlxsw_sp2_ptp_clock_init,
2565 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2566 .init = mlxsw_sp2_ptp_init,
2567 .fini = mlxsw_sp2_ptp_fini,
2568 .receive = mlxsw_sp2_ptp_receive,
2569 .transmitted = mlxsw_sp2_ptp_transmitted,
2570 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2571 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2572 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2573 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2574 .get_stats_count = mlxsw_sp2_get_stats_count,
2575 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2576 .get_stats = mlxsw_sp2_get_stats,
2577};
2578
2579static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2580 unsigned long event, void *ptr);
2581
2582static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2583 const struct mlxsw_bus_info *mlxsw_bus_info,
2584 struct netlink_ext_ack *extack)
2585{
2586 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2587 int err;
2588
2589 mlxsw_sp->core = mlxsw_core;
2590 mlxsw_sp->bus_info = mlxsw_bus_info;
2591
2592 mlxsw_core_emad_string_tlv_enable(mlxsw_core);
2593
2594 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2595 if (err) {
2596 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2597 return err;
2598 }
2599
2600 err = mlxsw_sp_kvdl_init(mlxsw_sp);
2601 if (err) {
2602 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
2603 return err;
2604 }
2605
2606 err = mlxsw_sp_fids_init(mlxsw_sp);
2607 if (err) {
2608 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
2609 goto err_fids_init;
2610 }
2611
2612 err = mlxsw_sp_policers_init(mlxsw_sp);
2613 if (err) {
2614 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
2615 goto err_policers_init;
2616 }
2617
2618 err = mlxsw_sp_traps_init(mlxsw_sp);
2619 if (err) {
2620 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
2621 goto err_traps_init;
2622 }
2623
2624 err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
2625 if (err) {
2626 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
2627 goto err_devlink_traps_init;
2628 }
2629
2630 err = mlxsw_sp_buffers_init(mlxsw_sp);
2631 if (err) {
2632 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2633 goto err_buffers_init;
2634 }
2635
2636 err = mlxsw_sp_lag_init(mlxsw_sp);
2637 if (err) {
2638 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2639 goto err_lag_init;
2640 }
2641
2642
2643
2644
2645 err = mlxsw_sp_span_init(mlxsw_sp);
2646 if (err) {
2647 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2648 goto err_span_init;
2649 }
2650
2651 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2652 if (err) {
2653 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2654 goto err_switchdev_init;
2655 }
2656
2657 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
2658 if (err) {
2659 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
2660 goto err_counter_pool_init;
2661 }
2662
2663 err = mlxsw_sp_afa_init(mlxsw_sp);
2664 if (err) {
2665 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
2666 goto err_afa_init;
2667 }
2668
2669 err = mlxsw_sp_nve_init(mlxsw_sp);
2670 if (err) {
2671 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
2672 goto err_nve_init;
2673 }
2674
2675 err = mlxsw_sp_acl_init(mlxsw_sp);
2676 if (err) {
2677 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
2678 goto err_acl_init;
2679 }
2680
2681 err = mlxsw_sp_router_init(mlxsw_sp, extack);
2682 if (err) {
2683 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2684 goto err_router_init;
2685 }
2686
2687 if (mlxsw_sp->bus_info->read_frc_capable) {
2688
2689 mlxsw_sp->clock =
2690 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
2691 mlxsw_sp->bus_info->dev);
2692 if (IS_ERR(mlxsw_sp->clock)) {
2693 err = PTR_ERR(mlxsw_sp->clock);
2694 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
2695 goto err_ptp_clock_init;
2696 }
2697 }
2698
2699 if (mlxsw_sp->clock) {
2700
2701 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
2702 if (IS_ERR(mlxsw_sp->ptp_state)) {
2703 err = PTR_ERR(mlxsw_sp->ptp_state);
2704 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
2705 goto err_ptp_init;
2706 }
2707 }
2708
2709
2710
2711
2712
2713 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
2714 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2715 &mlxsw_sp->netdevice_nb);
2716 if (err) {
2717 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
2718 goto err_netdev_notifier;
2719 }
2720
2721 err = mlxsw_sp_dpipe_init(mlxsw_sp);
2722 if (err) {
2723 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
2724 goto err_dpipe_init;
2725 }
2726
2727 err = mlxsw_sp_port_module_info_init(mlxsw_sp);
2728 if (err) {
2729 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
2730 goto err_port_module_info_init;
2731 }
2732
2733 err = mlxsw_sp_ports_create(mlxsw_sp);
2734 if (err) {
2735 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2736 goto err_ports_create;
2737 }
2738
2739 return 0;
2740
2741err_ports_create:
2742 mlxsw_sp_port_module_info_fini(mlxsw_sp);
2743err_port_module_info_init:
2744 mlxsw_sp_dpipe_fini(mlxsw_sp);
2745err_dpipe_init:
2746 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2747 &mlxsw_sp->netdevice_nb);
2748err_netdev_notifier:
2749 if (mlxsw_sp->clock)
2750 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2751err_ptp_init:
2752 if (mlxsw_sp->clock)
2753 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2754err_ptp_clock_init:
2755 mlxsw_sp_router_fini(mlxsw_sp);
2756err_router_init:
2757 mlxsw_sp_acl_fini(mlxsw_sp);
2758err_acl_init:
2759 mlxsw_sp_nve_fini(mlxsw_sp);
2760err_nve_init:
2761 mlxsw_sp_afa_fini(mlxsw_sp);
2762err_afa_init:
2763 mlxsw_sp_counter_pool_fini(mlxsw_sp);
2764err_counter_pool_init:
2765 mlxsw_sp_switchdev_fini(mlxsw_sp);
2766err_switchdev_init:
2767 mlxsw_sp_span_fini(mlxsw_sp);
2768err_span_init:
2769 mlxsw_sp_lag_fini(mlxsw_sp);
2770err_lag_init:
2771 mlxsw_sp_buffers_fini(mlxsw_sp);
2772err_buffers_init:
2773 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2774err_devlink_traps_init:
2775 mlxsw_sp_traps_fini(mlxsw_sp);
2776err_traps_init:
2777 mlxsw_sp_policers_fini(mlxsw_sp);
2778err_policers_init:
2779 mlxsw_sp_fids_fini(mlxsw_sp);
2780err_fids_init:
2781 mlxsw_sp_kvdl_fini(mlxsw_sp);
2782 return err;
2783}
2784
2785static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
2786 const struct mlxsw_bus_info *mlxsw_bus_info,
2787 struct netlink_ext_ack *extack)
2788{
2789 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2790
2791 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
2792 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
2793 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
2794 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
2795 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
2796 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
2797 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
2798 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
2799 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
2800 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
2801 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
2802 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
2803 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
2804 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
2805 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
2806 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
2807 mlxsw_sp->listeners = mlxsw_sp1_listener;
2808 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
2809 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
2810
2811 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2812}
2813
2814static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
2815 const struct mlxsw_bus_info *mlxsw_bus_info,
2816 struct netlink_ext_ack *extack)
2817{
2818 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2819
2820 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2821 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2822 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2823 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2824 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2825 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2826 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2827 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2828 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2829 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2830 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
2831 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2832 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2833 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
2834 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2835 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2836 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
2837
2838 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2839}
2840
2841static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
2842 const struct mlxsw_bus_info *mlxsw_bus_info,
2843 struct netlink_ext_ack *extack)
2844{
2845 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2846
2847 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2848 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2849 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2850 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2851 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2852 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2853 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2854 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2855 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2856 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2857 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
2858 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2859 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2860 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
2861 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2862 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2863 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
2864
2865 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2866}
2867
2868static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2869{
2870 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2871
2872 mlxsw_sp_ports_remove(mlxsw_sp);
2873 mlxsw_sp_port_module_info_fini(mlxsw_sp);
2874 mlxsw_sp_dpipe_fini(mlxsw_sp);
2875 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2876 &mlxsw_sp->netdevice_nb);
2877 if (mlxsw_sp->clock) {
2878 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2879 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2880 }
2881 mlxsw_sp_router_fini(mlxsw_sp);
2882 mlxsw_sp_acl_fini(mlxsw_sp);
2883 mlxsw_sp_nve_fini(mlxsw_sp);
2884 mlxsw_sp_afa_fini(mlxsw_sp);
2885 mlxsw_sp_counter_pool_fini(mlxsw_sp);
2886 mlxsw_sp_switchdev_fini(mlxsw_sp);
2887 mlxsw_sp_span_fini(mlxsw_sp);
2888 mlxsw_sp_lag_fini(mlxsw_sp);
2889 mlxsw_sp_buffers_fini(mlxsw_sp);
2890 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2891 mlxsw_sp_traps_fini(mlxsw_sp);
2892 mlxsw_sp_policers_fini(mlxsw_sp);
2893 mlxsw_sp_fids_fini(mlxsw_sp);
2894 mlxsw_sp_kvdl_fini(mlxsw_sp);
2895}
2896
2897
2898
2899
2900#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
2901 VLAN_VID_MASK - 1)
2902
2903static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
2904 .used_max_mid = 1,
2905 .max_mid = MLXSW_SP_MID_MAX,
2906 .used_flood_tables = 1,
2907 .used_flood_mode = 1,
2908 .flood_mode = 3,
2909 .max_fid_flood_tables = 3,
2910 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2911 .used_max_ib_mc = 1,
2912 .max_ib_mc = 0,
2913 .used_max_pkey = 1,
2914 .max_pkey = 0,
2915 .used_kvd_sizes = 1,
2916 .kvd_hash_single_parts = 59,
2917 .kvd_hash_double_parts = 41,
2918 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
2919 .swid_config = {
2920 {
2921 .used_type = 1,
2922 .type = MLXSW_PORT_SWID_TYPE_ETH,
2923 }
2924 },
2925};
2926
2927static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
2928 .used_max_mid = 1,
2929 .max_mid = MLXSW_SP_MID_MAX,
2930 .used_flood_tables = 1,
2931 .used_flood_mode = 1,
2932 .flood_mode = 3,
2933 .max_fid_flood_tables = 3,
2934 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2935 .used_max_ib_mc = 1,
2936 .max_ib_mc = 0,
2937 .used_max_pkey = 1,
2938 .max_pkey = 0,
2939 .used_kvh_xlt_cache_mode = 1,
2940 .kvh_xlt_cache_mode = 1,
2941 .swid_config = {
2942 {
2943 .used_type = 1,
2944 .type = MLXSW_PORT_SWID_TYPE_ETH,
2945 }
2946 },
2947};
2948
2949static void
2950mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
2951 struct devlink_resource_size_params *kvd_size_params,
2952 struct devlink_resource_size_params *linear_size_params,
2953 struct devlink_resource_size_params *hash_double_size_params,
2954 struct devlink_resource_size_params *hash_single_size_params)
2955{
2956 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2957 KVD_SINGLE_MIN_SIZE);
2958 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2959 KVD_DOUBLE_MIN_SIZE);
2960 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
2961 u32 linear_size_min = 0;
2962
2963 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
2964 MLXSW_SP_KVD_GRANULARITY,
2965 DEVLINK_RESOURCE_UNIT_ENTRY);
2966 devlink_resource_size_params_init(linear_size_params, linear_size_min,
2967 kvd_size - single_size_min -
2968 double_size_min,
2969 MLXSW_SP_KVD_GRANULARITY,
2970 DEVLINK_RESOURCE_UNIT_ENTRY);
2971 devlink_resource_size_params_init(hash_double_size_params,
2972 double_size_min,
2973 kvd_size - single_size_min -
2974 linear_size_min,
2975 MLXSW_SP_KVD_GRANULARITY,
2976 DEVLINK_RESOURCE_UNIT_ENTRY);
2977 devlink_resource_size_params_init(hash_single_size_params,
2978 single_size_min,
2979 kvd_size - double_size_min -
2980 linear_size_min,
2981 MLXSW_SP_KVD_GRANULARITY,
2982 DEVLINK_RESOURCE_UNIT_ENTRY);
2983}
2984
2985static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
2986{
2987 struct devlink *devlink = priv_to_devlink(mlxsw_core);
2988 struct devlink_resource_size_params hash_single_size_params;
2989 struct devlink_resource_size_params hash_double_size_params;
2990 struct devlink_resource_size_params linear_size_params;
2991 struct devlink_resource_size_params kvd_size_params;
2992 u32 kvd_size, single_size, double_size, linear_size;
2993 const struct mlxsw_config_profile *profile;
2994 int err;
2995
2996 profile = &mlxsw_sp1_config_profile;
2997 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
2998 return -EIO;
2999
3000 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3001 &linear_size_params,
3002 &hash_double_size_params,
3003 &hash_single_size_params);
3004
3005 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3006 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3007 kvd_size, MLXSW_SP_RESOURCE_KVD,
3008 DEVLINK_RESOURCE_ID_PARENT_TOP,
3009 &kvd_size_params);
3010 if (err)
3011 return err;
3012
3013 linear_size = profile->kvd_linear_size;
3014 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3015 linear_size,
3016 MLXSW_SP_RESOURCE_KVD_LINEAR,
3017 MLXSW_SP_RESOURCE_KVD,
3018 &linear_size_params);
3019 if (err)
3020 return err;
3021
3022 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3023 if (err)
3024 return err;
3025
3026 double_size = kvd_size - linear_size;
3027 double_size *= profile->kvd_hash_double_parts;
3028 double_size /= profile->kvd_hash_double_parts +
3029 profile->kvd_hash_single_parts;
3030 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3031 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3032 double_size,
3033 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3034 MLXSW_SP_RESOURCE_KVD,
3035 &hash_double_size_params);
3036 if (err)
3037 return err;
3038
3039 single_size = kvd_size - double_size - linear_size;
3040 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3041 single_size,
3042 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3043 MLXSW_SP_RESOURCE_KVD,
3044 &hash_single_size_params);
3045 if (err)
3046 return err;
3047
3048 return 0;
3049}
3050
3051static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3052{
3053 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3054 struct devlink_resource_size_params kvd_size_params;
3055 u32 kvd_size;
3056
3057 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3058 return -EIO;
3059
3060 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3061 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3062 MLXSW_SP_KVD_GRANULARITY,
3063 DEVLINK_RESOURCE_UNIT_ENTRY);
3064
3065 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3066 kvd_size, MLXSW_SP_RESOURCE_KVD,
3067 DEVLINK_RESOURCE_ID_PARENT_TOP,
3068 &kvd_size_params);
3069}
3070
3071static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3072{
3073 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3074 struct devlink_resource_size_params span_size_params;
3075 u32 max_span;
3076
3077 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3078 return -EIO;
3079
3080 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3081 devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3082 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3083
3084 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3085 max_span, MLXSW_SP_RESOURCE_SPAN,
3086 DEVLINK_RESOURCE_ID_PARENT_TOP,
3087 &span_size_params);
3088}
3089
3090static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3091{
3092 int err;
3093
3094 err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3095 if (err)
3096 return err;
3097
3098 err = mlxsw_sp_resources_span_register(mlxsw_core);
3099 if (err)
3100 goto err_resources_span_register;
3101
3102 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3103 if (err)
3104 goto err_resources_counter_register;
3105
3106 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3107 if (err)
3108 goto err_resources_counter_register;
3109
3110 return 0;
3111
3112err_resources_counter_register:
3113err_resources_span_register:
3114 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3115 return err;
3116}
3117
3118static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3119{
3120 int err;
3121
3122 err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3123 if (err)
3124 return err;
3125
3126 err = mlxsw_sp_resources_span_register(mlxsw_core);
3127 if (err)
3128 goto err_resources_span_register;
3129
3130 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3131 if (err)
3132 goto err_resources_counter_register;
3133
3134 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3135 if (err)
3136 goto err_resources_counter_register;
3137
3138 return 0;
3139
3140err_resources_counter_register:
3141err_resources_span_register:
3142 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3143 return err;
3144}
3145
3146static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3147 const struct mlxsw_config_profile *profile,
3148 u64 *p_single_size, u64 *p_double_size,
3149 u64 *p_linear_size)
3150{
3151 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3152 u32 double_size;
3153 int err;
3154
3155 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3156 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3157 return -EIO;
3158
3159
3160
3161
3162
3163
3164
3165
3166 err = devlink_resource_size_get(devlink,
3167 MLXSW_SP_RESOURCE_KVD_LINEAR,
3168 p_linear_size);
3169 if (err)
3170 *p_linear_size = profile->kvd_linear_size;
3171
3172 err = devlink_resource_size_get(devlink,
3173 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3174 p_double_size);
3175 if (err) {
3176 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3177 *p_linear_size;
3178 double_size *= profile->kvd_hash_double_parts;
3179 double_size /= profile->kvd_hash_double_parts +
3180 profile->kvd_hash_single_parts;
3181 *p_double_size = rounddown(double_size,
3182 MLXSW_SP_KVD_GRANULARITY);
3183 }
3184
3185 err = devlink_resource_size_get(devlink,
3186 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3187 p_single_size);
3188 if (err)
3189 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3190 *p_double_size - *p_linear_size;
3191
3192
3193 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3194 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3195 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3196 return -EIO;
3197
3198 return 0;
3199}
3200
3201static int
3202mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3203 struct devlink_param_gset_ctx *ctx)
3204{
3205 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3206 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3207
3208 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3209 return 0;
3210}
3211
3212static int
3213mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3214 struct devlink_param_gset_ctx *ctx)
3215{
3216 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3217 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3218
3219 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3220}
3221
3222static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3223 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3224 "acl_region_rehash_interval",
3225 DEVLINK_PARAM_TYPE_U32,
3226 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3227 mlxsw_sp_params_acl_region_rehash_intrvl_get,
3228 mlxsw_sp_params_acl_region_rehash_intrvl_set,
3229 NULL),
3230};
3231
3232static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3233{
3234 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3235 union devlink_param_value value;
3236 int err;
3237
3238 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3239 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3240 if (err)
3241 return err;
3242
3243 value.vu32 = 0;
3244 devlink_param_driverinit_value_set(devlink,
3245 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3246 value);
3247 return 0;
3248}
3249
3250static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3251{
3252 devlink_params_unregister(priv_to_devlink(mlxsw_core),
3253 mlxsw_sp2_devlink_params,
3254 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3255}
3256
3257static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3258 struct sk_buff *skb, u8 local_port)
3259{
3260 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3261
3262 skb_pull(skb, MLXSW_TXHDR_LEN);
3263 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3264}
3265
3266static struct mlxsw_driver mlxsw_sp1_driver = {
3267 .kind = mlxsw_sp1_driver_name,
3268 .priv_size = sizeof(struct mlxsw_sp),
3269 .fw_req_rev = &mlxsw_sp1_fw_rev,
3270 .fw_filename = MLXSW_SP1_FW_FILENAME,
3271 .init = mlxsw_sp1_init,
3272 .fini = mlxsw_sp_fini,
3273 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3274 .port_split = mlxsw_sp_port_split,
3275 .port_unsplit = mlxsw_sp_port_unsplit,
3276 .sb_pool_get = mlxsw_sp_sb_pool_get,
3277 .sb_pool_set = mlxsw_sp_sb_pool_set,
3278 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3279 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3280 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3281 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3282 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3283 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3284 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3285 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3286 .trap_init = mlxsw_sp_trap_init,
3287 .trap_fini = mlxsw_sp_trap_fini,
3288 .trap_action_set = mlxsw_sp_trap_action_set,
3289 .trap_group_init = mlxsw_sp_trap_group_init,
3290 .trap_group_set = mlxsw_sp_trap_group_set,
3291 .trap_policer_init = mlxsw_sp_trap_policer_init,
3292 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3293 .trap_policer_set = mlxsw_sp_trap_policer_set,
3294 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3295 .txhdr_construct = mlxsw_sp_txhdr_construct,
3296 .resources_register = mlxsw_sp1_resources_register,
3297 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
3298 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3299 .txhdr_len = MLXSW_TXHDR_LEN,
3300 .profile = &mlxsw_sp1_config_profile,
3301 .res_query_enabled = true,
3302 .fw_fatal_enabled = true,
3303 .temp_warn_enabled = true,
3304};
3305
3306static struct mlxsw_driver mlxsw_sp2_driver = {
3307 .kind = mlxsw_sp2_driver_name,
3308 .priv_size = sizeof(struct mlxsw_sp),
3309 .fw_req_rev = &mlxsw_sp2_fw_rev,
3310 .fw_filename = MLXSW_SP2_FW_FILENAME,
3311 .init = mlxsw_sp2_init,
3312 .fini = mlxsw_sp_fini,
3313 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3314 .port_split = mlxsw_sp_port_split,
3315 .port_unsplit = mlxsw_sp_port_unsplit,
3316 .sb_pool_get = mlxsw_sp_sb_pool_get,
3317 .sb_pool_set = mlxsw_sp_sb_pool_set,
3318 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3319 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3320 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3321 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3322 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3323 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3324 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3325 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3326 .trap_init = mlxsw_sp_trap_init,
3327 .trap_fini = mlxsw_sp_trap_fini,
3328 .trap_action_set = mlxsw_sp_trap_action_set,
3329 .trap_group_init = mlxsw_sp_trap_group_init,
3330 .trap_group_set = mlxsw_sp_trap_group_set,
3331 .trap_policer_init = mlxsw_sp_trap_policer_init,
3332 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3333 .trap_policer_set = mlxsw_sp_trap_policer_set,
3334 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3335 .txhdr_construct = mlxsw_sp_txhdr_construct,
3336 .resources_register = mlxsw_sp2_resources_register,
3337 .params_register = mlxsw_sp2_params_register,
3338 .params_unregister = mlxsw_sp2_params_unregister,
3339 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3340 .txhdr_len = MLXSW_TXHDR_LEN,
3341 .profile = &mlxsw_sp2_config_profile,
3342 .res_query_enabled = true,
3343 .fw_fatal_enabled = true,
3344 .temp_warn_enabled = true,
3345};
3346
3347static struct mlxsw_driver mlxsw_sp3_driver = {
3348 .kind = mlxsw_sp3_driver_name,
3349 .priv_size = sizeof(struct mlxsw_sp),
3350 .fw_req_rev = &mlxsw_sp3_fw_rev,
3351 .fw_filename = MLXSW_SP3_FW_FILENAME,
3352 .init = mlxsw_sp3_init,
3353 .fini = mlxsw_sp_fini,
3354 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3355 .port_split = mlxsw_sp_port_split,
3356 .port_unsplit = mlxsw_sp_port_unsplit,
3357 .sb_pool_get = mlxsw_sp_sb_pool_get,
3358 .sb_pool_set = mlxsw_sp_sb_pool_set,
3359 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3360 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3361 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3362 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3363 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3364 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3365 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3366 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3367 .trap_init = mlxsw_sp_trap_init,
3368 .trap_fini = mlxsw_sp_trap_fini,
3369 .trap_action_set = mlxsw_sp_trap_action_set,
3370 .trap_group_init = mlxsw_sp_trap_group_init,
3371 .trap_group_set = mlxsw_sp_trap_group_set,
3372 .trap_policer_init = mlxsw_sp_trap_policer_init,
3373 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3374 .trap_policer_set = mlxsw_sp_trap_policer_set,
3375 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3376 .txhdr_construct = mlxsw_sp_txhdr_construct,
3377 .resources_register = mlxsw_sp2_resources_register,
3378 .params_register = mlxsw_sp2_params_register,
3379 .params_unregister = mlxsw_sp2_params_unregister,
3380 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3381 .txhdr_len = MLXSW_TXHDR_LEN,
3382 .profile = &mlxsw_sp2_config_profile,
3383 .res_query_enabled = true,
3384 .fw_fatal_enabled = true,
3385 .temp_warn_enabled = true,
3386};
3387
3388bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3389{
3390 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3391}
3392
3393static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
3394 struct netdev_nested_priv *priv)
3395{
3396 int ret = 0;
3397
3398 if (mlxsw_sp_port_dev_check(lower_dev)) {
3399 priv->data = (void *)netdev_priv(lower_dev);
3400 ret = 1;
3401 }
3402
3403 return ret;
3404}
3405
3406struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3407{
3408 struct netdev_nested_priv priv = {
3409 .data = NULL,
3410 };
3411
3412 if (mlxsw_sp_port_dev_check(dev))
3413 return netdev_priv(dev);
3414
3415 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
3416
3417 return (struct mlxsw_sp_port *)priv.data;
3418}
3419
3420struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3421{
3422 struct mlxsw_sp_port *mlxsw_sp_port;
3423
3424 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3425 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3426}
3427
3428struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3429{
3430 struct netdev_nested_priv priv = {
3431 .data = NULL,
3432 };
3433
3434 if (mlxsw_sp_port_dev_check(dev))
3435 return netdev_priv(dev);
3436
3437 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3438 &priv);
3439
3440 return (struct mlxsw_sp_port *)priv.data;
3441}
3442
3443struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3444{
3445 struct mlxsw_sp_port *mlxsw_sp_port;
3446
3447 rcu_read_lock();
3448 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3449 if (mlxsw_sp_port)
3450 dev_hold(mlxsw_sp_port->dev);
3451 rcu_read_unlock();
3452 return mlxsw_sp_port;
3453}
3454
3455void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3456{
3457 dev_put(mlxsw_sp_port->dev);
3458}
3459
3460static void
3461mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3462 struct net_device *lag_dev)
3463{
3464 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3465 struct net_device *upper_dev;
3466 struct list_head *iter;
3467
3468 if (netif_is_bridge_port(lag_dev))
3469 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3470
3471 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3472 if (!netif_is_bridge_port(upper_dev))
3473 continue;
3474 br_dev = netdev_master_upper_dev_get(upper_dev);
3475 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3476 }
3477}
3478
3479static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3480{
3481 char sldr_pl[MLXSW_REG_SLDR_LEN];
3482
3483 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3484 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3485}
3486
3487static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3488{
3489 char sldr_pl[MLXSW_REG_SLDR_LEN];
3490
3491 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3492 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3493}
3494
3495static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3496 u16 lag_id, u8 port_index)
3497{
3498 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3499 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3500
3501 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3502 lag_id, port_index);
3503 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3504}
3505
3506static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3507 u16 lag_id)
3508{
3509 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3510 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3511
3512 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3513 lag_id);
3514 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3515}
3516
3517static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3518 u16 lag_id)
3519{
3520 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3521 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3522
3523 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3524 lag_id);
3525 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3526}
3527
3528static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3529 u16 lag_id)
3530{
3531 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3532 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3533
3534 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3535 lag_id);
3536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3537}
3538
3539static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3540 struct net_device *lag_dev,
3541 u16 *p_lag_id)
3542{
3543 struct mlxsw_sp_upper *lag;
3544 int free_lag_id = -1;
3545 u64 max_lag;
3546 int i;
3547
3548 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3549 for (i = 0; i < max_lag; i++) {
3550 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3551 if (lag->ref_count) {
3552 if (lag->dev == lag_dev) {
3553 *p_lag_id = i;
3554 return 0;
3555 }
3556 } else if (free_lag_id < 0) {
3557 free_lag_id = i;
3558 }
3559 }
3560 if (free_lag_id < 0)
3561 return -EBUSY;
3562 *p_lag_id = free_lag_id;
3563 return 0;
3564}
3565
3566static bool
3567mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3568 struct net_device *lag_dev,
3569 struct netdev_lag_upper_info *lag_upper_info,
3570 struct netlink_ext_ack *extack)
3571{
3572 u16 lag_id;
3573
3574 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
3575 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
3576 return false;
3577 }
3578 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
3579 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
3580 return false;
3581 }
3582 return true;
3583}
3584
3585static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3586 u16 lag_id, u8 *p_port_index)
3587{
3588 u64 max_lag_members;
3589 int i;
3590
3591 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3592 MAX_LAG_MEMBERS);
3593 for (i = 0; i < max_lag_members; i++) {
3594 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3595 *p_port_index = i;
3596 return 0;
3597 }
3598 }
3599 return -EBUSY;
3600}
3601
3602static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3603 struct net_device *lag_dev,
3604 struct netlink_ext_ack *extack)
3605{
3606 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3607 struct mlxsw_sp_upper *lag;
3608 u16 lag_id;
3609 u8 port_index;
3610 int err;
3611
3612 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3613 if (err)
3614 return err;
3615 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3616 if (!lag->ref_count) {
3617 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3618 if (err)
3619 return err;
3620 lag->dev = lag_dev;
3621 }
3622
3623 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3624 if (err)
3625 return err;
3626 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3627 if (err)
3628 goto err_col_port_add;
3629
3630 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3631 mlxsw_sp_port->local_port);
3632 mlxsw_sp_port->lag_id = lag_id;
3633 mlxsw_sp_port->lagged = 1;
3634 lag->ref_count++;
3635
3636
3637 if (mlxsw_sp_port->default_vlan->fid)
3638 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
3639
3640
3641 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
3642 lag_dev, extack);
3643 if (err)
3644 goto err_router_join;
3645
3646 return 0;
3647
3648err_router_join:
3649 lag->ref_count--;
3650 mlxsw_sp_port->lagged = 0;
3651 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3652 mlxsw_sp_port->local_port);
3653 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3654err_col_port_add:
3655 if (!lag->ref_count)
3656 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3657 return err;
3658}
3659
3660static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3661 struct net_device *lag_dev)
3662{
3663 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3664 u16 lag_id = mlxsw_sp_port->lag_id;
3665 struct mlxsw_sp_upper *lag;
3666
3667 if (!mlxsw_sp_port->lagged)
3668 return;
3669 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3670 WARN_ON(lag->ref_count == 0);
3671
3672 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3673
3674
3675 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
3676 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
3677
3678
3679
3680 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
3681
3682 if (lag->ref_count == 1)
3683 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3684
3685 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3686 mlxsw_sp_port->local_port);
3687 mlxsw_sp_port->lagged = 0;
3688 lag->ref_count--;
3689
3690
3691 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
3692 ETH_P_8021Q);
3693}
3694
3695static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3696 u16 lag_id)
3697{
3698 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3699 char sldr_pl[MLXSW_REG_SLDR_LEN];
3700
3701 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3702 mlxsw_sp_port->local_port);
3703 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3704}
3705
3706static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3707 u16 lag_id)
3708{
3709 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3710 char sldr_pl[MLXSW_REG_SLDR_LEN];
3711
3712 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3713 mlxsw_sp_port->local_port);
3714 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3715}
3716
3717static int
3718mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
3719{
3720 int err;
3721
3722 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
3723 mlxsw_sp_port->lag_id);
3724 if (err)
3725 return err;
3726
3727 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3728 if (err)
3729 goto err_dist_port_add;
3730
3731 return 0;
3732
3733err_dist_port_add:
3734 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3735 return err;
3736}
3737
3738static int
3739mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
3740{
3741 int err;
3742
3743 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3744 mlxsw_sp_port->lag_id);
3745 if (err)
3746 return err;
3747
3748 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
3749 mlxsw_sp_port->lag_id);
3750 if (err)
3751 goto err_col_port_disable;
3752
3753 return 0;
3754
3755err_col_port_disable:
3756 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3757 return err;
3758}
3759
3760static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3761 struct netdev_lag_lower_state_info *info)
3762{
3763 if (info->tx_enabled)
3764 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
3765 else
3766 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3767}
3768
3769static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
3770 bool enable)
3771{
3772 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3773 enum mlxsw_reg_spms_state spms_state;
3774 char *spms_pl;
3775 u16 vid;
3776 int err;
3777
3778 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
3779 MLXSW_REG_SPMS_STATE_DISCARDING;
3780
3781 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
3782 if (!spms_pl)
3783 return -ENOMEM;
3784 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
3785
3786 for (vid = 0; vid < VLAN_N_VID; vid++)
3787 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
3788
3789 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
3790 kfree(spms_pl);
3791 return err;
3792}
3793
3794static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
3795{
3796 u16 vid = 1;
3797 int err;
3798
3799 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
3800 if (err)
3801 return err;
3802 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
3803 if (err)
3804 goto err_port_stp_set;
3805 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3806 true, false);
3807 if (err)
3808 goto err_port_vlan_set;
3809
3810 for (; vid <= VLAN_N_VID - 1; vid++) {
3811 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3812 vid, false);
3813 if (err)
3814 goto err_vid_learning_set;
3815 }
3816
3817 return 0;
3818
3819err_vid_learning_set:
3820 for (vid--; vid >= 1; vid--)
3821 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
3822err_port_vlan_set:
3823 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3824err_port_stp_set:
3825 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3826 return err;
3827}
3828
3829static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3830{
3831 u16 vid;
3832
3833 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
3834 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3835 vid, true);
3836
3837 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3838 false, false);
3839 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3840 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3841}
3842
3843static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
3844{
3845 unsigned int num_vxlans = 0;
3846 struct net_device *dev;
3847 struct list_head *iter;
3848
3849 netdev_for_each_lower_dev(br_dev, dev, iter) {
3850 if (netif_is_vxlan(dev))
3851 num_vxlans++;
3852 }
3853
3854 return num_vxlans > 1;
3855}
3856
3857static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
3858{
3859 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
3860 struct net_device *dev;
3861 struct list_head *iter;
3862
3863 netdev_for_each_lower_dev(br_dev, dev, iter) {
3864 u16 pvid;
3865 int err;
3866
3867 if (!netif_is_vxlan(dev))
3868 continue;
3869
3870 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
3871 if (err || !pvid)
3872 continue;
3873
3874 if (test_and_set_bit(pvid, vlans))
3875 return false;
3876 }
3877
3878 return true;
3879}
3880
3881static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
3882 struct netlink_ext_ack *extack)
3883{
3884 if (br_multicast_enabled(br_dev)) {
3885 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
3886 return false;
3887 }
3888
3889 if (!br_vlan_enabled(br_dev) &&
3890 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
3891 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
3892 return false;
3893 }
3894
3895 if (br_vlan_enabled(br_dev) &&
3896 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
3897 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
3898 return false;
3899 }
3900
3901 return true;
3902}
3903
3904static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
3905 struct net_device *dev,
3906 unsigned long event, void *ptr)
3907{
3908 struct netdev_notifier_changeupper_info *info;
3909 struct mlxsw_sp_port *mlxsw_sp_port;
3910 struct netlink_ext_ack *extack;
3911 struct net_device *upper_dev;
3912 struct mlxsw_sp *mlxsw_sp;
3913 int err = 0;
3914 u16 proto;
3915
3916 mlxsw_sp_port = netdev_priv(dev);
3917 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3918 info = ptr;
3919 extack = netdev_notifier_info_to_extack(&info->info);
3920
3921 switch (event) {
3922 case NETDEV_PRECHANGEUPPER:
3923 upper_dev = info->upper_dev;
3924 if (!is_vlan_dev(upper_dev) &&
3925 !netif_is_lag_master(upper_dev) &&
3926 !netif_is_bridge_master(upper_dev) &&
3927 !netif_is_ovs_master(upper_dev) &&
3928 !netif_is_macvlan(upper_dev)) {
3929 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
3930 return -EINVAL;
3931 }
3932 if (!info->linking)
3933 break;
3934 if (netif_is_bridge_master(upper_dev) &&
3935 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
3936 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
3937 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
3938 return -EOPNOTSUPP;
3939 if (netdev_has_any_upper_dev(upper_dev) &&
3940 (!netif_is_bridge_master(upper_dev) ||
3941 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
3942 upper_dev))) {
3943 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
3944 return -EINVAL;
3945 }
3946 if (netif_is_lag_master(upper_dev) &&
3947 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3948 info->upper_info, extack))
3949 return -EINVAL;
3950 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
3951 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
3952 return -EINVAL;
3953 }
3954 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3955 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
3956 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
3957 return -EINVAL;
3958 }
3959 if (netif_is_macvlan(upper_dev) &&
3960 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
3961 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
3962 return -EOPNOTSUPP;
3963 }
3964 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
3965 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
3966 return -EINVAL;
3967 }
3968 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
3969 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
3970 return -EINVAL;
3971 }
3972 if (netif_is_bridge_master(upper_dev)) {
3973 br_vlan_get_proto(upper_dev, &proto);
3974 if (br_vlan_enabled(upper_dev) &&
3975 proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
3976 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
3977 return -EOPNOTSUPP;
3978 }
3979 if (vlan_uses_dev(lower_dev) &&
3980 br_vlan_enabled(upper_dev) &&
3981 proto == ETH_P_8021AD) {
3982 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
3983 return -EOPNOTSUPP;
3984 }
3985 }
3986 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
3987 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
3988
3989 if (br_vlan_enabled(br_dev)) {
3990 br_vlan_get_proto(br_dev, &proto);
3991 if (proto == ETH_P_8021AD) {
3992 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
3993 return -EOPNOTSUPP;
3994 }
3995 }
3996 }
3997 if (is_vlan_dev(upper_dev) &&
3998 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
3999 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4000 return -EOPNOTSUPP;
4001 }
4002 break;
4003 case NETDEV_CHANGEUPPER:
4004 upper_dev = info->upper_dev;
4005 if (netif_is_bridge_master(upper_dev)) {
4006 if (info->linking)
4007 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4008 lower_dev,
4009 upper_dev,
4010 extack);
4011 else
4012 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4013 lower_dev,
4014 upper_dev);
4015 } else if (netif_is_lag_master(upper_dev)) {
4016 if (info->linking) {
4017 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4018 upper_dev, extack);
4019 } else {
4020 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4021 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4022 upper_dev);
4023 }
4024 } else if (netif_is_ovs_master(upper_dev)) {
4025 if (info->linking)
4026 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4027 else
4028 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4029 } else if (netif_is_macvlan(upper_dev)) {
4030 if (!info->linking)
4031 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4032 } else if (is_vlan_dev(upper_dev)) {
4033 struct net_device *br_dev;
4034
4035 if (!netif_is_bridge_port(upper_dev))
4036 break;
4037 if (info->linking)
4038 break;
4039 br_dev = netdev_master_upper_dev_get(upper_dev);
4040 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4041 br_dev);
4042 }
4043 break;
4044 }
4045
4046 return err;
4047}
4048
4049static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4050 unsigned long event, void *ptr)
4051{
4052 struct netdev_notifier_changelowerstate_info *info;
4053 struct mlxsw_sp_port *mlxsw_sp_port;
4054 int err;
4055
4056 mlxsw_sp_port = netdev_priv(dev);
4057 info = ptr;
4058
4059 switch (event) {
4060 case NETDEV_CHANGELOWERSTATE:
4061 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4062 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4063 info->lower_state_info);
4064 if (err)
4065 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4066 }
4067 break;
4068 }
4069
4070 return 0;
4071}
4072
4073static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4074 struct net_device *port_dev,
4075 unsigned long event, void *ptr)
4076{
4077 switch (event) {
4078 case NETDEV_PRECHANGEUPPER:
4079 case NETDEV_CHANGEUPPER:
4080 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4081 event, ptr);
4082 case NETDEV_CHANGELOWERSTATE:
4083 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4084 ptr);
4085 }
4086
4087 return 0;
4088}
4089
4090static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4091 unsigned long event, void *ptr)
4092{
4093 struct net_device *dev;
4094 struct list_head *iter;
4095 int ret;
4096
4097 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4098 if (mlxsw_sp_port_dev_check(dev)) {
4099 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4100 ptr);
4101 if (ret)
4102 return ret;
4103 }
4104 }
4105
4106 return 0;
4107}
4108
4109static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4110 struct net_device *dev,
4111 unsigned long event, void *ptr,
4112 u16 vid)
4113{
4114 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4115 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4116 struct netdev_notifier_changeupper_info *info = ptr;
4117 struct netlink_ext_ack *extack;
4118 struct net_device *upper_dev;
4119 int err = 0;
4120
4121 extack = netdev_notifier_info_to_extack(&info->info);
4122
4123 switch (event) {
4124 case NETDEV_PRECHANGEUPPER:
4125 upper_dev = info->upper_dev;
4126 if (!netif_is_bridge_master(upper_dev) &&
4127 !netif_is_macvlan(upper_dev)) {
4128 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4129 return -EINVAL;
4130 }
4131 if (!info->linking)
4132 break;
4133 if (netif_is_bridge_master(upper_dev) &&
4134 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4135 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4136 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4137 return -EOPNOTSUPP;
4138 if (netdev_has_any_upper_dev(upper_dev) &&
4139 (!netif_is_bridge_master(upper_dev) ||
4140 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4141 upper_dev))) {
4142 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4143 return -EINVAL;
4144 }
4145 if (netif_is_macvlan(upper_dev) &&
4146 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4147 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4148 return -EOPNOTSUPP;
4149 }
4150 break;
4151 case NETDEV_CHANGEUPPER:
4152 upper_dev = info->upper_dev;
4153 if (netif_is_bridge_master(upper_dev)) {
4154 if (info->linking)
4155 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4156 vlan_dev,
4157 upper_dev,
4158 extack);
4159 else
4160 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4161 vlan_dev,
4162 upper_dev);
4163 } else if (netif_is_macvlan(upper_dev)) {
4164 if (!info->linking)
4165 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4166 } else {
4167 err = -EINVAL;
4168 WARN_ON(1);
4169 }
4170 break;
4171 }
4172
4173 return err;
4174}
4175
4176static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4177 struct net_device *lag_dev,
4178 unsigned long event,
4179 void *ptr, u16 vid)
4180{
4181 struct net_device *dev;
4182 struct list_head *iter;
4183 int ret;
4184
4185 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4186 if (mlxsw_sp_port_dev_check(dev)) {
4187 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4188 event, ptr,
4189 vid);
4190 if (ret)
4191 return ret;
4192 }
4193 }
4194
4195 return 0;
4196}
4197
4198static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4199 struct net_device *br_dev,
4200 unsigned long event, void *ptr,
4201 u16 vid)
4202{
4203 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4204 struct netdev_notifier_changeupper_info *info = ptr;
4205 struct netlink_ext_ack *extack;
4206 struct net_device *upper_dev;
4207
4208 if (!mlxsw_sp)
4209 return 0;
4210
4211 extack = netdev_notifier_info_to_extack(&info->info);
4212
4213 switch (event) {
4214 case NETDEV_PRECHANGEUPPER:
4215 upper_dev = info->upper_dev;
4216 if (!netif_is_macvlan(upper_dev)) {
4217 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4218 return -EOPNOTSUPP;
4219 }
4220 if (!info->linking)
4221 break;
4222 if (netif_is_macvlan(upper_dev) &&
4223 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4224 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4225 return -EOPNOTSUPP;
4226 }
4227 break;
4228 case NETDEV_CHANGEUPPER:
4229 upper_dev = info->upper_dev;
4230 if (info->linking)
4231 break;
4232 if (netif_is_macvlan(upper_dev))
4233 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4234 break;
4235 }
4236
4237 return 0;
4238}
4239
4240static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4241 unsigned long event, void *ptr)
4242{
4243 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4244 u16 vid = vlan_dev_vlan_id(vlan_dev);
4245
4246 if (mlxsw_sp_port_dev_check(real_dev))
4247 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4248 event, ptr, vid);
4249 else if (netif_is_lag_master(real_dev))
4250 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4251 real_dev, event,
4252 ptr, vid);
4253 else if (netif_is_bridge_master(real_dev))
4254 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4255 event, ptr, vid);
4256
4257 return 0;
4258}
4259
4260static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4261 unsigned long event, void *ptr)
4262{
4263 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4264 struct netdev_notifier_changeupper_info *info = ptr;
4265 struct netlink_ext_ack *extack;
4266 struct net_device *upper_dev;
4267 u16 proto;
4268
4269 if (!mlxsw_sp)
4270 return 0;
4271
4272 extack = netdev_notifier_info_to_extack(&info->info);
4273
4274 switch (event) {
4275 case NETDEV_PRECHANGEUPPER:
4276 upper_dev = info->upper_dev;
4277 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4278 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4279 return -EOPNOTSUPP;
4280 }
4281 if (!info->linking)
4282 break;
4283 if (br_vlan_enabled(br_dev)) {
4284 br_vlan_get_proto(br_dev, &proto);
4285 if (proto == ETH_P_8021AD) {
4286 NL_SET_ERR_MSG_MOD(extack, "Uppers are not supported on top of an 802.1ad bridge");
4287 return -EOPNOTSUPP;
4288 }
4289 }
4290 if (is_vlan_dev(upper_dev) &&
4291 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4292 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4293 return -EOPNOTSUPP;
4294 }
4295 if (netif_is_macvlan(upper_dev) &&
4296 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
4297 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4298 return -EOPNOTSUPP;
4299 }
4300 break;
4301 case NETDEV_CHANGEUPPER:
4302 upper_dev = info->upper_dev;
4303 if (info->linking)
4304 break;
4305 if (is_vlan_dev(upper_dev))
4306 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4307 if (netif_is_macvlan(upper_dev))
4308 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4309 break;
4310 }
4311
4312 return 0;
4313}
4314
4315static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4316 unsigned long event, void *ptr)
4317{
4318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4319 struct netdev_notifier_changeupper_info *info = ptr;
4320 struct netlink_ext_ack *extack;
4321
4322 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4323 return 0;
4324
4325 extack = netdev_notifier_info_to_extack(&info->info);
4326
4327
4328 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4329
4330 return -EOPNOTSUPP;
4331}
4332
4333static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4334{
4335 struct netdev_notifier_changeupper_info *info = ptr;
4336
4337 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4338 return false;
4339 return netif_is_l3_master(info->upper_dev);
4340}
4341
4342static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
4343 struct net_device *dev,
4344 unsigned long event, void *ptr)
4345{
4346 struct netdev_notifier_changeupper_info *cu_info;
4347 struct netdev_notifier_info *info = ptr;
4348 struct netlink_ext_ack *extack;
4349 struct net_device *upper_dev;
4350
4351 extack = netdev_notifier_info_to_extack(info);
4352
4353 switch (event) {
4354 case NETDEV_CHANGEUPPER:
4355 cu_info = container_of(info,
4356 struct netdev_notifier_changeupper_info,
4357 info);
4358 upper_dev = cu_info->upper_dev;
4359 if (!netif_is_bridge_master(upper_dev))
4360 return 0;
4361 if (!mlxsw_sp_lower_get(upper_dev))
4362 return 0;
4363 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4364 return -EOPNOTSUPP;
4365 if (cu_info->linking) {
4366 if (!netif_running(dev))
4367 return 0;
4368
4369
4370
4371
4372 if (br_vlan_enabled(upper_dev))
4373 return 0;
4374 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
4375 dev, 0, extack);
4376 } else {
4377
4378
4379
4380 if (br_vlan_enabled(upper_dev))
4381 return 0;
4382 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4383 }
4384 break;
4385 case NETDEV_PRE_UP:
4386 upper_dev = netdev_master_upper_dev_get(dev);
4387 if (!upper_dev)
4388 return 0;
4389 if (!netif_is_bridge_master(upper_dev))
4390 return 0;
4391 if (!mlxsw_sp_lower_get(upper_dev))
4392 return 0;
4393 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
4394 extack);
4395 case NETDEV_DOWN:
4396 upper_dev = netdev_master_upper_dev_get(dev);
4397 if (!upper_dev)
4398 return 0;
4399 if (!netif_is_bridge_master(upper_dev))
4400 return 0;
4401 if (!mlxsw_sp_lower_get(upper_dev))
4402 return 0;
4403 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4404 break;
4405 }
4406
4407 return 0;
4408}
4409
4410static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4411 unsigned long event, void *ptr)
4412{
4413 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4414 struct mlxsw_sp_span_entry *span_entry;
4415 struct mlxsw_sp *mlxsw_sp;
4416 int err = 0;
4417
4418 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4419 if (event == NETDEV_UNREGISTER) {
4420 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4421 if (span_entry)
4422 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4423 }
4424 mlxsw_sp_span_respin(mlxsw_sp);
4425
4426 if (netif_is_vxlan(dev))
4427 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
4428 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4429 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4430 event, ptr);
4431 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4432 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4433 event, ptr);
4434 else if (event == NETDEV_PRE_CHANGEADDR ||
4435 event == NETDEV_CHANGEADDR ||
4436 event == NETDEV_CHANGEMTU)
4437 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
4438 else if (mlxsw_sp_is_vrf_event(event, ptr))
4439 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4440 else if (mlxsw_sp_port_dev_check(dev))
4441 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4442 else if (netif_is_lag_master(dev))
4443 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4444 else if (is_vlan_dev(dev))
4445 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4446 else if (netif_is_bridge_master(dev))
4447 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4448 else if (netif_is_macvlan(dev))
4449 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
4450
4451 return notifier_from_errno(err);
4452}
4453
4454static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4455 .notifier_call = mlxsw_sp_inetaddr_valid_event,
4456};
4457
4458static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4459 .notifier_call = mlxsw_sp_inet6addr_valid_event,
4460};
4461
4462static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
4463 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4464 {0, },
4465};
4466
4467static struct pci_driver mlxsw_sp1_pci_driver = {
4468 .name = mlxsw_sp1_driver_name,
4469 .id_table = mlxsw_sp1_pci_id_table,
4470};
4471
4472static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
4473 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
4474 {0, },
4475};
4476
4477static struct pci_driver mlxsw_sp2_pci_driver = {
4478 .name = mlxsw_sp2_driver_name,
4479 .id_table = mlxsw_sp2_pci_id_table,
4480};
4481
4482static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
4483 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
4484 {0, },
4485};
4486
4487static struct pci_driver mlxsw_sp3_pci_driver = {
4488 .name = mlxsw_sp3_driver_name,
4489 .id_table = mlxsw_sp3_pci_id_table,
4490};
4491
4492static int __init mlxsw_sp_module_init(void)
4493{
4494 int err;
4495
4496 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4497 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4498
4499 err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
4500 if (err)
4501 goto err_sp1_core_driver_register;
4502
4503 err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
4504 if (err)
4505 goto err_sp2_core_driver_register;
4506
4507 err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
4508 if (err)
4509 goto err_sp3_core_driver_register;
4510
4511 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
4512 if (err)
4513 goto err_sp1_pci_driver_register;
4514
4515 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
4516 if (err)
4517 goto err_sp2_pci_driver_register;
4518
4519 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
4520 if (err)
4521 goto err_sp3_pci_driver_register;
4522
4523 return 0;
4524
4525err_sp3_pci_driver_register:
4526 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4527err_sp2_pci_driver_register:
4528 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4529err_sp1_pci_driver_register:
4530 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4531err_sp3_core_driver_register:
4532 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4533err_sp2_core_driver_register:
4534 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4535err_sp1_core_driver_register:
4536 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4537 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4538 return err;
4539}
4540
4541static void __exit mlxsw_sp_module_exit(void)
4542{
4543 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
4544 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4545 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4546 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4547 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4548 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4549 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4550 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4551}
4552
4553module_init(mlxsw_sp_module_init);
4554module_exit(mlxsw_sp_module_exit);
4555
4556MODULE_LICENSE("Dual BSD/GPL");
4557MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4558MODULE_DESCRIPTION("Mellanox Spectrum driver");
4559MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
4560MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
4561MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
4562MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
4563MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
4564MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
4565