1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/types.h>
7#include <linux/pci.h>
8#include <linux/netdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/ethtool.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/skbuff.h>
14#include <linux/if_vlan.h>
15#include <linux/if_bridge.h>
16#include <linux/workqueue.h>
17#include <linux/jiffies.h>
18#include <linux/bitops.h>
19#include <linux/list.h>
20#include <linux/notifier.h>
21#include <linux/dcbnl.h>
22#include <linux/inetdevice.h>
23#include <linux/netlink.h>
24#include <linux/jhash.h>
25#include <linux/log2.h>
26#include <linux/refcount.h>
27#include <linux/rhashtable.h>
28#include <net/switchdev.h>
29#include <net/pkt_cls.h>
30#include <net/netevent.h>
31#include <net/addrconf.h>
32
33#include "spectrum.h"
34#include "pci.h"
35#include "core.h"
36#include "core_env.h"
37#include "reg.h"
38#include "port.h"
39#include "trap.h"
40#include "txheader.h"
41#include "spectrum_cnt.h"
42#include "spectrum_dpipe.h"
43#include "spectrum_acl_flex_actions.h"
44#include "spectrum_span.h"
45#include "spectrum_ptp.h"
46#include "spectrum_trap.h"
47
48#define MLXSW_SP1_FWREV_MAJOR 13
49#define MLXSW_SP1_FWREV_MINOR 2008
50#define MLXSW_SP1_FWREV_SUBMINOR 2406
51#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
52
53static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
54 .major = MLXSW_SP1_FWREV_MAJOR,
55 .minor = MLXSW_SP1_FWREV_MINOR,
56 .subminor = MLXSW_SP1_FWREV_SUBMINOR,
57 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
58};
59
60#define MLXSW_SP1_FW_FILENAME \
61 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
62 "." __stringify(MLXSW_SP1_FWREV_MINOR) \
63 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
64
65#define MLXSW_SP2_FWREV_MAJOR 29
66#define MLXSW_SP2_FWREV_MINOR 2008
67#define MLXSW_SP2_FWREV_SUBMINOR 2406
68
69static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70 .major = MLXSW_SP2_FWREV_MAJOR,
71 .minor = MLXSW_SP2_FWREV_MINOR,
72 .subminor = MLXSW_SP2_FWREV_SUBMINOR,
73};
74
75#define MLXSW_SP2_FW_FILENAME \
76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77 "." __stringify(MLXSW_SP2_FWREV_MINOR) \
78 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
79
80#define MLXSW_SP3_FWREV_MAJOR 30
81#define MLXSW_SP3_FWREV_MINOR 2008
82#define MLXSW_SP3_FWREV_SUBMINOR 2406
83
84static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
85 .major = MLXSW_SP3_FWREV_MAJOR,
86 .minor = MLXSW_SP3_FWREV_MINOR,
87 .subminor = MLXSW_SP3_FWREV_SUBMINOR,
88};
89
90#define MLXSW_SP3_FW_FILENAME \
91 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
92 "." __stringify(MLXSW_SP3_FWREV_MINOR) \
93 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
94
95static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
96static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
97static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
98
99static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
100 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
101};
102static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
103 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
104};
105
106
107
108
109
110MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
111
112
113
114
115
116
117MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
118
119
120
121
122MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
123
124
125
126
127MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
128
129
130
131
132
133MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
134
135
136
137
138MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
139
140
141
142
143
144MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
145
146
147
148
149MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
150
151
152
153
154
155
156
157
158
159MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
160
161
162
163
164
165
166MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
167
168
169
170
171
172MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
173
174int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
175 unsigned int counter_index, u64 *packets,
176 u64 *bytes)
177{
178 char mgpc_pl[MLXSW_REG_MGPC_LEN];
179 int err;
180
181 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
182 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
184 if (err)
185 return err;
186 if (packets)
187 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
188 if (bytes)
189 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
190 return 0;
191}
192
193static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
194 unsigned int counter_index)
195{
196 char mgpc_pl[MLXSW_REG_MGPC_LEN];
197
198 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
199 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
200 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
201}
202
203int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
204 unsigned int *p_counter_index)
205{
206 int err;
207
208 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
209 p_counter_index);
210 if (err)
211 return err;
212 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
213 if (err)
214 goto err_counter_clear;
215 return 0;
216
217err_counter_clear:
218 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
219 *p_counter_index);
220 return err;
221}
222
223void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
224 unsigned int counter_index)
225{
226 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
227 counter_index);
228}
229
230static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
231 const struct mlxsw_tx_info *tx_info)
232{
233 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
234
235 memset(txhdr, 0, MLXSW_TXHDR_LEN);
236
237 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
238 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
239 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
240 mlxsw_tx_hdr_swid_set(txhdr, 0);
241 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
242 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
243 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
244}
245
246enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
247{
248 switch (state) {
249 case BR_STATE_FORWARDING:
250 return MLXSW_REG_SPMS_STATE_FORWARDING;
251 case BR_STATE_LEARNING:
252 return MLXSW_REG_SPMS_STATE_LEARNING;
253 case BR_STATE_LISTENING:
254 case BR_STATE_DISABLED:
255 case BR_STATE_BLOCKING:
256 return MLXSW_REG_SPMS_STATE_DISCARDING;
257 default:
258 BUG();
259 }
260}
261
262int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
263 u8 state)
264{
265 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
266 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
267 char *spms_pl;
268 int err;
269
270 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
271 if (!spms_pl)
272 return -ENOMEM;
273 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
274 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
275
276 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
277 kfree(spms_pl);
278 return err;
279}
280
281static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
282{
283 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
284 int err;
285
286 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
287 if (err)
288 return err;
289 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
290 return 0;
291}
292
293int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
294 bool is_up)
295{
296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
297 char paos_pl[MLXSW_REG_PAOS_LEN];
298
299 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
300 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
301 MLXSW_PORT_ADMIN_STATUS_DOWN);
302 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
303}
304
305static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
306 unsigned char *addr)
307{
308 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
309 char ppad_pl[MLXSW_REG_PPAD_LEN];
310
311 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
312 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
313 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
314}
315
316static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
317{
318 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
319 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
320
321 ether_addr_copy(addr, mlxsw_sp->base_mac);
322 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
323 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
324}
325
326static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
327{
328 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
329 char pmtu_pl[MLXSW_REG_PMTU_LEN];
330 int err;
331
332 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
333 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
334 if (err)
335 return err;
336
337 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
338 return 0;
339}
340
341static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
342{
343 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
344 char pmtu_pl[MLXSW_REG_PMTU_LEN];
345
346 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
347 if (mtu > mlxsw_sp_port->max_mtu)
348 return -EINVAL;
349
350 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
351 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
352}
353
354static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
355{
356 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
357 char pspa_pl[MLXSW_REG_PSPA_LEN];
358
359 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
360 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
361}
362
363int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
364{
365 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
366 char svpe_pl[MLXSW_REG_SVPE_LEN];
367
368 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
369 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
370}
371
372int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
373 bool learn_enable)
374{
375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
376 char *spvmlr_pl;
377 int err;
378
379 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
380 if (!spvmlr_pl)
381 return -ENOMEM;
382 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
383 learn_enable);
384 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
385 kfree(spvmlr_pl);
386 return err;
387}
388
389int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
390{
391 switch (ethtype) {
392 case ETH_P_8021Q:
393 *p_sver_type = 0;
394 break;
395 case ETH_P_8021AD:
396 *p_sver_type = 1;
397 break;
398 default:
399 return -EINVAL;
400 }
401
402 return 0;
403}
404
405int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
406 u16 ethtype)
407{
408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
409 char spevet_pl[MLXSW_REG_SPEVET_LEN];
410 u8 sver_type;
411 int err;
412
413 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
414 if (err)
415 return err;
416
417 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
419}
420
421static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
422 u16 vid, u16 ethtype)
423{
424 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
425 char spvid_pl[MLXSW_REG_SPVID_LEN];
426 u8 sver_type;
427 int err;
428
429 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
430 if (err)
431 return err;
432
433 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
434 sver_type);
435
436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
437}
438
439static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
440 bool allow)
441{
442 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
443 char spaft_pl[MLXSW_REG_SPAFT_LEN];
444
445 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
446 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
447}
448
449int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
450 u16 ethtype)
451{
452 int err;
453
454 if (!vid) {
455 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
456 if (err)
457 return err;
458 } else {
459 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
460 if (err)
461 return err;
462 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
463 if (err)
464 goto err_port_allow_untagged_set;
465 }
466
467 mlxsw_sp_port->pvid = vid;
468 return 0;
469
470err_port_allow_untagged_set:
471 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
472 return err;
473}
474
475static int
476mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
477{
478 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
479 char sspr_pl[MLXSW_REG_SSPR_LEN];
480
481 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
482 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
483}
484
485static int
486mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
487 struct mlxsw_sp_port_mapping *port_mapping)
488{
489 char pmlp_pl[MLXSW_REG_PMLP_LEN];
490 bool separate_rxtx;
491 u8 module;
492 u8 width;
493 int err;
494 int i;
495
496 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
497 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
498 if (err)
499 return err;
500 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
501 width = mlxsw_reg_pmlp_width_get(pmlp_pl);
502 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
503
504 if (width && !is_power_of_2(width)) {
505 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
506 local_port);
507 return -EINVAL;
508 }
509
510 for (i = 0; i < width; i++) {
511 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
512 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
513 local_port);
514 return -EINVAL;
515 }
516 if (separate_rxtx &&
517 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
518 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
519 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
520 local_port);
521 return -EINVAL;
522 }
523 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
524 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
525 local_port);
526 return -EINVAL;
527 }
528 }
529
530 port_mapping->module = module;
531 port_mapping->width = width;
532 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
533 return 0;
534}
535
536static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
537{
538 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
540 char pmlp_pl[MLXSW_REG_PMLP_LEN];
541 int i;
542
543 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
544 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
545 for (i = 0; i < port_mapping->width; i++) {
546 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
547 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i);
548 }
549
550 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
551}
552
553static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
554{
555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
556 char pmlp_pl[MLXSW_REG_PMLP_LEN];
557
558 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
559 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
560 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
561}
562
563static int mlxsw_sp_port_open(struct net_device *dev)
564{
565 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
566 int err;
567
568 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
569 if (err)
570 return err;
571 netif_start_queue(dev);
572 return 0;
573}
574
575static int mlxsw_sp_port_stop(struct net_device *dev)
576{
577 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
578
579 netif_stop_queue(dev);
580 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
581}
582
583static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
584 struct net_device *dev)
585{
586 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
587 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
588 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
589 const struct mlxsw_tx_info tx_info = {
590 .local_port = mlxsw_sp_port->local_port,
591 .is_emad = false,
592 };
593 u64 len;
594 int err;
595
596 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
597 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
598 dev_kfree_skb_any(skb);
599 return NETDEV_TX_OK;
600 }
601
602 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
603
604 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
605 return NETDEV_TX_BUSY;
606
607 if (eth_skb_pad(skb)) {
608 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
609 return NETDEV_TX_OK;
610 }
611
612 mlxsw_sp_txhdr_construct(skb, &tx_info);
613
614
615
616 len = skb->len - MLXSW_TXHDR_LEN;
617
618
619
620
621 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
622
623 if (!err) {
624 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
625 u64_stats_update_begin(&pcpu_stats->syncp);
626 pcpu_stats->tx_packets++;
627 pcpu_stats->tx_bytes += len;
628 u64_stats_update_end(&pcpu_stats->syncp);
629 } else {
630 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
631 dev_kfree_skb_any(skb);
632 }
633 return NETDEV_TX_OK;
634}
635
636static void mlxsw_sp_set_rx_mode(struct net_device *dev)
637{
638}
639
640static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
641{
642 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
643 struct sockaddr *addr = p;
644 int err;
645
646 if (!is_valid_ether_addr(addr->sa_data))
647 return -EADDRNOTAVAIL;
648
649 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
650 if (err)
651 return err;
652 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
653 return 0;
654}
655
656static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
657{
658 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
659 struct mlxsw_sp_hdroom orig_hdroom;
660 struct mlxsw_sp_hdroom hdroom;
661 int err;
662
663 orig_hdroom = *mlxsw_sp_port->hdroom;
664
665 hdroom = orig_hdroom;
666 hdroom.mtu = mtu;
667 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
668
669 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
670 if (err) {
671 netdev_err(dev, "Failed to configure port's headroom\n");
672 return err;
673 }
674
675 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
676 if (err)
677 goto err_port_mtu_set;
678 dev->mtu = mtu;
679 return 0;
680
681err_port_mtu_set:
682 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
683 return err;
684}
685
686static int
687mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
688 struct rtnl_link_stats64 *stats)
689{
690 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
691 struct mlxsw_sp_port_pcpu_stats *p;
692 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
693 u32 tx_dropped = 0;
694 unsigned int start;
695 int i;
696
697 for_each_possible_cpu(i) {
698 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
699 do {
700 start = u64_stats_fetch_begin_irq(&p->syncp);
701 rx_packets = p->rx_packets;
702 rx_bytes = p->rx_bytes;
703 tx_packets = p->tx_packets;
704 tx_bytes = p->tx_bytes;
705 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
706
707 stats->rx_packets += rx_packets;
708 stats->rx_bytes += rx_bytes;
709 stats->tx_packets += tx_packets;
710 stats->tx_bytes += tx_bytes;
711
712 tx_dropped += p->tx_dropped;
713 }
714 stats->tx_dropped = tx_dropped;
715 return 0;
716}
717
718static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
719{
720 switch (attr_id) {
721 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
722 return true;
723 }
724
725 return false;
726}
727
728static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
729 void *sp)
730{
731 switch (attr_id) {
732 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
733 return mlxsw_sp_port_get_sw_stats64(dev, sp);
734 }
735
736 return -EINVAL;
737}
738
739int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
740 int prio, char *ppcnt_pl)
741{
742 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
743 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
744
745 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
746 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
747}
748
749static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
750 struct rtnl_link_stats64 *stats)
751{
752 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
753 int err;
754
755 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
756 0, ppcnt_pl);
757 if (err)
758 goto out;
759
760 stats->tx_packets =
761 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
762 stats->rx_packets =
763 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
764 stats->tx_bytes =
765 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
766 stats->rx_bytes =
767 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
768 stats->multicast =
769 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
770
771 stats->rx_crc_errors =
772 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
773 stats->rx_frame_errors =
774 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
775
776 stats->rx_length_errors = (
777 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
778 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
779 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
780
781 stats->rx_errors = (stats->rx_crc_errors +
782 stats->rx_frame_errors + stats->rx_length_errors);
783
784out:
785 return err;
786}
787
788static void
789mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
790 struct mlxsw_sp_port_xstats *xstats)
791{
792 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
793 int err, i;
794
795 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
796 ppcnt_pl);
797 if (!err)
798 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
799
800 for (i = 0; i < TC_MAX_QUEUE; i++) {
801 err = mlxsw_sp_port_get_stats_raw(dev,
802 MLXSW_REG_PPCNT_TC_CONG_TC,
803 i, ppcnt_pl);
804 if (!err)
805 xstats->wred_drop[i] =
806 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
807
808 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
809 i, ppcnt_pl);
810 if (err)
811 continue;
812
813 xstats->backlog[i] =
814 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
815 xstats->tail_drop[i] =
816 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
817 }
818
819 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
820 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
821 i, ppcnt_pl);
822 if (err)
823 continue;
824
825 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
826 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
827 }
828}
829
830static void update_stats_cache(struct work_struct *work)
831{
832 struct mlxsw_sp_port *mlxsw_sp_port =
833 container_of(work, struct mlxsw_sp_port,
834 periodic_hw_stats.update_dw.work);
835
836 if (!netif_carrier_ok(mlxsw_sp_port->dev))
837
838
839
840 goto out;
841
842 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
843 &mlxsw_sp_port->periodic_hw_stats.stats);
844 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
845 &mlxsw_sp_port->periodic_hw_stats.xstats);
846
847out:
848 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
849 MLXSW_HW_STATS_UPDATE_TIME);
850}
851
852
853
854
855static void
856mlxsw_sp_port_get_stats64(struct net_device *dev,
857 struct rtnl_link_stats64 *stats)
858{
859 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
860
861 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
862}
863
864static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
865 u16 vid_begin, u16 vid_end,
866 bool is_member, bool untagged)
867{
868 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
869 char *spvm_pl;
870 int err;
871
872 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
873 if (!spvm_pl)
874 return -ENOMEM;
875
876 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
877 vid_end, is_member, untagged);
878 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
879 kfree(spvm_pl);
880 return err;
881}
882
883int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
884 u16 vid_end, bool is_member, bool untagged)
885{
886 u16 vid, vid_e;
887 int err;
888
889 for (vid = vid_begin; vid <= vid_end;
890 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
891 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
892 vid_end);
893
894 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
895 is_member, untagged);
896 if (err)
897 return err;
898 }
899
900 return 0;
901}
902
903static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
904 bool flush_default)
905{
906 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
907
908 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
909 &mlxsw_sp_port->vlans_list, list) {
910 if (!flush_default &&
911 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
912 continue;
913 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
914 }
915}
916
917static void
918mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
919{
920 if (mlxsw_sp_port_vlan->bridge_port)
921 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
922 else if (mlxsw_sp_port_vlan->fid)
923 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
924}
925
926struct mlxsw_sp_port_vlan *
927mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
928{
929 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
930 bool untagged = vid == MLXSW_SP_DEFAULT_VID;
931 int err;
932
933 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
934 if (mlxsw_sp_port_vlan)
935 return ERR_PTR(-EEXIST);
936
937 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
938 if (err)
939 return ERR_PTR(err);
940
941 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
942 if (!mlxsw_sp_port_vlan) {
943 err = -ENOMEM;
944 goto err_port_vlan_alloc;
945 }
946
947 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
948 mlxsw_sp_port_vlan->vid = vid;
949 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
950
951 return mlxsw_sp_port_vlan;
952
953err_port_vlan_alloc:
954 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
955 return ERR_PTR(err);
956}
957
958void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
959{
960 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
961 u16 vid = mlxsw_sp_port_vlan->vid;
962
963 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
964 list_del(&mlxsw_sp_port_vlan->list);
965 kfree(mlxsw_sp_port_vlan);
966 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
967}
968
969static int mlxsw_sp_port_add_vid(struct net_device *dev,
970 __be16 __always_unused proto, u16 vid)
971{
972 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
973
974
975
976
977 if (!vid)
978 return 0;
979
980 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
981}
982
983static int mlxsw_sp_port_kill_vid(struct net_device *dev,
984 __be16 __always_unused proto, u16 vid)
985{
986 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
987 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
988
989
990
991
992 if (!vid)
993 return 0;
994
995 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
996 if (!mlxsw_sp_port_vlan)
997 return 0;
998 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
999
1000 return 0;
1001}
1002
1003static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1004 struct flow_block_offload *f)
1005{
1006 switch (f->binder_type) {
1007 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1008 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1009 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1010 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1011 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1012 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1013 default:
1014 return -EOPNOTSUPP;
1015 }
1016}
1017
1018static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1019 void *type_data)
1020{
1021 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1022
1023 switch (type) {
1024 case TC_SETUP_BLOCK:
1025 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1026 case TC_SETUP_QDISC_RED:
1027 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1028 case TC_SETUP_QDISC_PRIO:
1029 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1030 case TC_SETUP_QDISC_ETS:
1031 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1032 case TC_SETUP_QDISC_TBF:
1033 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1034 case TC_SETUP_QDISC_FIFO:
1035 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1036 default:
1037 return -EOPNOTSUPP;
1038 }
1039}
1040
1041static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1042{
1043 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1044
1045 if (!enable) {
1046 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1047 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1048 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1049 return -EINVAL;
1050 }
1051 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1052 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1053 } else {
1054 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1055 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1056 }
1057 return 0;
1058}
1059
1060static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1061{
1062 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1063 char pplr_pl[MLXSW_REG_PPLR_LEN];
1064 int err;
1065
1066 if (netif_running(dev))
1067 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1068
1069 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1070 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1071 pplr_pl);
1072
1073 if (netif_running(dev))
1074 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1075
1076 return err;
1077}
1078
1079typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1080
1081static int mlxsw_sp_handle_feature(struct net_device *dev,
1082 netdev_features_t wanted_features,
1083 netdev_features_t feature,
1084 mlxsw_sp_feature_handler feature_handler)
1085{
1086 netdev_features_t changes = wanted_features ^ dev->features;
1087 bool enable = !!(wanted_features & feature);
1088 int err;
1089
1090 if (!(changes & feature))
1091 return 0;
1092
1093 err = feature_handler(dev, enable);
1094 if (err) {
1095 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1096 enable ? "Enable" : "Disable", &feature, err);
1097 return err;
1098 }
1099
1100 if (enable)
1101 dev->features |= feature;
1102 else
1103 dev->features &= ~feature;
1104
1105 return 0;
1106}
1107static int mlxsw_sp_set_features(struct net_device *dev,
1108 netdev_features_t features)
1109{
1110 netdev_features_t oper_features = dev->features;
1111 int err = 0;
1112
1113 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1114 mlxsw_sp_feature_hw_tc);
1115 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1116 mlxsw_sp_feature_loopback);
1117
1118 if (err) {
1119 dev->features = oper_features;
1120 return -EINVAL;
1121 }
1122
1123 return 0;
1124}
1125
1126static struct devlink_port *
1127mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1128{
1129 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1130 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1131
1132 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1133 mlxsw_sp_port->local_port);
1134}
1135
1136static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1137 struct ifreq *ifr)
1138{
1139 struct hwtstamp_config config;
1140 int err;
1141
1142 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1143 return -EFAULT;
1144
1145 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1146 &config);
1147 if (err)
1148 return err;
1149
1150 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1151 return -EFAULT;
1152
1153 return 0;
1154}
1155
1156static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1157 struct ifreq *ifr)
1158{
1159 struct hwtstamp_config config;
1160 int err;
1161
1162 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1163 &config);
1164 if (err)
1165 return err;
1166
1167 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1168 return -EFAULT;
1169
1170 return 0;
1171}
1172
1173static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1174{
1175 struct hwtstamp_config config = {0};
1176
1177 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1178}
1179
1180static int
1181mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1182{
1183 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1184
1185 switch (cmd) {
1186 case SIOCSHWTSTAMP:
1187 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1188 case SIOCGHWTSTAMP:
1189 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1190 default:
1191 return -EOPNOTSUPP;
1192 }
1193}
1194
1195static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1196 .ndo_open = mlxsw_sp_port_open,
1197 .ndo_stop = mlxsw_sp_port_stop,
1198 .ndo_start_xmit = mlxsw_sp_port_xmit,
1199 .ndo_setup_tc = mlxsw_sp_setup_tc,
1200 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1201 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1202 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1203 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1204 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1205 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1206 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1207 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1208 .ndo_set_features = mlxsw_sp_set_features,
1209 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
1210 .ndo_do_ioctl = mlxsw_sp_port_ioctl,
1211};
1212
1213static int
1214mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1215{
1216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1217 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1218 const struct mlxsw_sp_port_type_speed_ops *ops;
1219 char ptys_pl[MLXSW_REG_PTYS_LEN];
1220 u32 eth_proto_cap_masked;
1221 int err;
1222
1223 ops = mlxsw_sp->port_type_speed_ops;
1224
1225
1226
1227
1228 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1229 0, false);
1230 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1231 if (err)
1232 return err;
1233
1234 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap,
1235 ð_proto_admin, ð_proto_oper);
1236 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1237 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1238 eth_proto_cap_masked,
1239 mlxsw_sp_port->link.autoneg);
1240 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1241}
1242
1243int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1244{
1245 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1246 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1247 char ptys_pl[MLXSW_REG_PTYS_LEN];
1248 u32 eth_proto_oper;
1249 int err;
1250
1251 port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1252 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1253 mlxsw_sp_port->local_port, 0,
1254 false);
1255 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1256 if (err)
1257 return err;
1258 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1259 ð_proto_oper);
1260 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1261 return 0;
1262}
1263
1264int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1265 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1266 bool dwrr, u8 dwrr_weight)
1267{
1268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1269 char qeec_pl[MLXSW_REG_QEEC_LEN];
1270
1271 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1272 next_index);
1273 mlxsw_reg_qeec_de_set(qeec_pl, true);
1274 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1275 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1276 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1277}
1278
1279int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1280 enum mlxsw_reg_qeec_hr hr, u8 index,
1281 u8 next_index, u32 maxrate, u8 burst_size)
1282{
1283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1284 char qeec_pl[MLXSW_REG_QEEC_LEN];
1285
1286 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1287 next_index);
1288 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1289 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1290 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1292}
1293
1294static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1295 enum mlxsw_reg_qeec_hr hr, u8 index,
1296 u8 next_index, u32 minrate)
1297{
1298 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1299 char qeec_pl[MLXSW_REG_QEEC_LEN];
1300
1301 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1302 next_index);
1303 mlxsw_reg_qeec_mise_set(qeec_pl, true);
1304 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1305
1306 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1307}
1308
1309int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1310 u8 switch_prio, u8 tclass)
1311{
1312 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1313 char qtct_pl[MLXSW_REG_QTCT_LEN];
1314
1315 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1316 tclass);
1317 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1318}
1319
1320static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1321{
1322 int err, i;
1323
1324
1325
1326
1327 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1328 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1329 if (err)
1330 return err;
1331 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1332 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1333 MLXSW_REG_QEEC_HR_SUBGROUP, i,
1334 0, false, 0);
1335 if (err)
1336 return err;
1337 }
1338 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1339 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1340 MLXSW_REG_QEEC_HR_TC, i, i,
1341 false, 0);
1342 if (err)
1343 return err;
1344
1345 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1346 MLXSW_REG_QEEC_HR_TC,
1347 i + 8, i,
1348 true, 100);
1349 if (err)
1350 return err;
1351 }
1352
1353
1354
1355
1356
1357 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1358 MLXSW_REG_QEEC_HR_PORT, 0, 0,
1359 MLXSW_REG_QEEC_MAS_DIS, 0);
1360 if (err)
1361 return err;
1362 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1363 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1364 MLXSW_REG_QEEC_HR_SUBGROUP,
1365 i, 0,
1366 MLXSW_REG_QEEC_MAS_DIS, 0);
1367 if (err)
1368 return err;
1369 }
1370 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1371 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1372 MLXSW_REG_QEEC_HR_TC,
1373 i, i,
1374 MLXSW_REG_QEEC_MAS_DIS, 0);
1375 if (err)
1376 return err;
1377
1378 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1379 MLXSW_REG_QEEC_HR_TC,
1380 i + 8, i,
1381 MLXSW_REG_QEEC_MAS_DIS, 0);
1382 if (err)
1383 return err;
1384 }
1385
1386
1387 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1388 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1389 MLXSW_REG_QEEC_HR_TC,
1390 i + 8, i,
1391 MLXSW_REG_QEEC_MIS_MIN);
1392 if (err)
1393 return err;
1394 }
1395
1396
1397 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1398 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1399 if (err)
1400 return err;
1401 }
1402
1403 return 0;
1404}
1405
1406static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1407 bool enable)
1408{
1409 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1410 char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1411
1412 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1413 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1414}
1415
1416static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1417{
1418 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1419 u8 module = mlxsw_sp_port->mapping.module;
1420 u64 overheat_counter;
1421 int err;
1422
1423 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
1424 &overheat_counter);
1425 if (err)
1426 return err;
1427
1428 mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1429 return 0;
1430}
1431
1432int
1433mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1434 bool is_8021ad_tagged,
1435 bool is_8021q_tagged)
1436{
1437 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1438 char spvc_pl[MLXSW_REG_SPVC_LEN];
1439
1440 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1441 is_8021ad_tagged, is_8021q_tagged);
1442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1443}
1444
1445static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1446 u8 split_base_local_port,
1447 struct mlxsw_sp_port_mapping *port_mapping)
1448{
1449 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1450 bool split = !!split_base_local_port;
1451 struct mlxsw_sp_port *mlxsw_sp_port;
1452 u32 lanes = port_mapping->width;
1453 struct net_device *dev;
1454 bool splittable;
1455 int err;
1456
1457 splittable = lanes > 1 && !split;
1458 err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
1459 port_mapping->module + 1, split,
1460 port_mapping->lane / lanes,
1461 splittable, lanes,
1462 mlxsw_sp->base_mac,
1463 sizeof(mlxsw_sp->base_mac));
1464 if (err) {
1465 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1466 local_port);
1467 return err;
1468 }
1469
1470 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1471 if (!dev) {
1472 err = -ENOMEM;
1473 goto err_alloc_etherdev;
1474 }
1475 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1476 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1477 mlxsw_sp_port = netdev_priv(dev);
1478 mlxsw_sp_port->dev = dev;
1479 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1480 mlxsw_sp_port->local_port = local_port;
1481 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1482 mlxsw_sp_port->split = split;
1483 mlxsw_sp_port->split_base_local_port = split_base_local_port;
1484 mlxsw_sp_port->mapping = *port_mapping;
1485 mlxsw_sp_port->link.autoneg = 1;
1486 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1487
1488 mlxsw_sp_port->pcpu_stats =
1489 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1490 if (!mlxsw_sp_port->pcpu_stats) {
1491 err = -ENOMEM;
1492 goto err_alloc_stats;
1493 }
1494
1495 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1496 &update_stats_cache);
1497
1498 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1499 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1500
1501 err = mlxsw_sp_port_module_map(mlxsw_sp_port);
1502 if (err) {
1503 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1504 mlxsw_sp_port->local_port);
1505 goto err_port_module_map;
1506 }
1507
1508 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1509 if (err) {
1510 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1511 mlxsw_sp_port->local_port);
1512 goto err_port_swid_set;
1513 }
1514
1515 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1516 if (err) {
1517 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1518 mlxsw_sp_port->local_port);
1519 goto err_dev_addr_init;
1520 }
1521
1522 netif_carrier_off(dev);
1523
1524 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1525 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1526 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1527
1528 dev->min_mtu = 0;
1529 dev->max_mtu = ETH_MAX_MTU;
1530
1531
1532
1533
1534 dev->needed_headroom = MLXSW_TXHDR_LEN;
1535
1536 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1537 if (err) {
1538 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1539 mlxsw_sp_port->local_port);
1540 goto err_port_system_port_mapping_set;
1541 }
1542
1543 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1544 if (err) {
1545 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1546 mlxsw_sp_port->local_port);
1547 goto err_port_speed_by_width_set;
1548 }
1549
1550 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1551 &mlxsw_sp_port->max_speed);
1552 if (err) {
1553 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1554 mlxsw_sp_port->local_port);
1555 goto err_max_speed_get;
1556 }
1557
1558 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1559 if (err) {
1560 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1561 mlxsw_sp_port->local_port);
1562 goto err_port_max_mtu_get;
1563 }
1564
1565 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1566 if (err) {
1567 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1568 mlxsw_sp_port->local_port);
1569 goto err_port_mtu_set;
1570 }
1571
1572 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1573 if (err)
1574 goto err_port_admin_status_set;
1575
1576 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1577 if (err) {
1578 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1579 mlxsw_sp_port->local_port);
1580 goto err_port_buffers_init;
1581 }
1582
1583 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1584 if (err) {
1585 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1586 mlxsw_sp_port->local_port);
1587 goto err_port_ets_init;
1588 }
1589
1590 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1591 if (err) {
1592 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1593 mlxsw_sp_port->local_port);
1594 goto err_port_tc_mc_mode;
1595 }
1596
1597
1598 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1599 if (err) {
1600 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1601 mlxsw_sp_port->local_port);
1602 goto err_port_dcb_init;
1603 }
1604
1605 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1606 if (err) {
1607 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1608 mlxsw_sp_port->local_port);
1609 goto err_port_fids_init;
1610 }
1611
1612 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1613 if (err) {
1614 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1615 mlxsw_sp_port->local_port);
1616 goto err_port_qdiscs_init;
1617 }
1618
1619 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1620 false);
1621 if (err) {
1622 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1623 mlxsw_sp_port->local_port);
1624 goto err_port_vlan_clear;
1625 }
1626
1627 err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1628 if (err) {
1629 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1630 mlxsw_sp_port->local_port);
1631 goto err_port_nve_init;
1632 }
1633
1634 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1635 ETH_P_8021Q);
1636 if (err) {
1637 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1638 mlxsw_sp_port->local_port);
1639 goto err_port_pvid_set;
1640 }
1641
1642 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1643 MLXSW_SP_DEFAULT_VID);
1644 if (IS_ERR(mlxsw_sp_port_vlan)) {
1645 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1646 mlxsw_sp_port->local_port);
1647 err = PTR_ERR(mlxsw_sp_port_vlan);
1648 goto err_port_vlan_create;
1649 }
1650 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1651
1652
1653
1654
1655 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1656 if (err) {
1657 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1658 local_port);
1659 goto err_port_vlan_classification_set;
1660 }
1661
1662 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1663 mlxsw_sp->ptp_ops->shaper_work);
1664
1665 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1666
1667 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1668 if (err) {
1669 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1670 mlxsw_sp_port->local_port);
1671 goto err_port_overheat_init_val_set;
1672 }
1673
1674 err = register_netdev(dev);
1675 if (err) {
1676 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1677 mlxsw_sp_port->local_port);
1678 goto err_register_netdev;
1679 }
1680
1681 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1682 mlxsw_sp_port, dev);
1683 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1684 return 0;
1685
1686err_register_netdev:
1687err_port_overheat_init_val_set:
1688 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1689err_port_vlan_classification_set:
1690 mlxsw_sp->ports[local_port] = NULL;
1691 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1692err_port_vlan_create:
1693err_port_pvid_set:
1694 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1695err_port_nve_init:
1696err_port_vlan_clear:
1697 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1698err_port_qdiscs_init:
1699 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1700err_port_fids_init:
1701 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1702err_port_dcb_init:
1703 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1704err_port_tc_mc_mode:
1705err_port_ets_init:
1706 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1707err_port_buffers_init:
1708err_port_admin_status_set:
1709err_port_mtu_set:
1710err_port_max_mtu_get:
1711err_max_speed_get:
1712err_port_speed_by_width_set:
1713err_port_system_port_mapping_set:
1714err_dev_addr_init:
1715 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1716err_port_swid_set:
1717 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1718err_port_module_map:
1719 free_percpu(mlxsw_sp_port->pcpu_stats);
1720err_alloc_stats:
1721 free_netdev(dev);
1722err_alloc_etherdev:
1723 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1724 return err;
1725}
1726
1727static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1728{
1729 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1730
1731 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1732 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1733 mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1734 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1735 unregister_netdev(mlxsw_sp_port->dev);
1736 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1737 mlxsw_sp->ports[local_port] = NULL;
1738 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1739 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1740 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1741 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1742 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1743 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1744 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1745 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1746 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1747 free_percpu(mlxsw_sp_port->pcpu_stats);
1748 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1749 free_netdev(mlxsw_sp_port->dev);
1750 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1751}
1752
1753static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1754{
1755 struct mlxsw_sp_port *mlxsw_sp_port;
1756 int err;
1757
1758 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1759 if (!mlxsw_sp_port)
1760 return -ENOMEM;
1761
1762 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1763 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1764
1765 err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1766 mlxsw_sp_port,
1767 mlxsw_sp->base_mac,
1768 sizeof(mlxsw_sp->base_mac));
1769 if (err) {
1770 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1771 goto err_core_cpu_port_init;
1772 }
1773
1774 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1775 return 0;
1776
1777err_core_cpu_port_init:
1778 kfree(mlxsw_sp_port);
1779 return err;
1780}
1781
1782static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1783{
1784 struct mlxsw_sp_port *mlxsw_sp_port =
1785 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1786
1787 mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1788 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1789 kfree(mlxsw_sp_port);
1790}
1791
1792static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1793{
1794 return mlxsw_sp->ports[local_port] != NULL;
1795}
1796
1797static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1798{
1799 int i;
1800
1801 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1802 if (mlxsw_sp_port_created(mlxsw_sp, i))
1803 mlxsw_sp_port_remove(mlxsw_sp, i);
1804 mlxsw_sp_cpu_port_remove(mlxsw_sp);
1805 kfree(mlxsw_sp->ports);
1806 mlxsw_sp->ports = NULL;
1807}
1808
1809static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1810{
1811 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1812 struct mlxsw_sp_port_mapping *port_mapping;
1813 size_t alloc_size;
1814 int i;
1815 int err;
1816
1817 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
1818 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1819 if (!mlxsw_sp->ports)
1820 return -ENOMEM;
1821
1822 err = mlxsw_sp_cpu_port_create(mlxsw_sp);
1823 if (err)
1824 goto err_cpu_port_create;
1825
1826 for (i = 1; i < max_ports; i++) {
1827 port_mapping = mlxsw_sp->port_mapping[i];
1828 if (!port_mapping)
1829 continue;
1830 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
1831 if (err)
1832 goto err_port_create;
1833 }
1834 return 0;
1835
1836err_port_create:
1837 for (i--; i >= 1; i--)
1838 if (mlxsw_sp_port_created(mlxsw_sp, i))
1839 mlxsw_sp_port_remove(mlxsw_sp, i);
1840 mlxsw_sp_cpu_port_remove(mlxsw_sp);
1841err_cpu_port_create:
1842 kfree(mlxsw_sp->ports);
1843 mlxsw_sp->ports = NULL;
1844 return err;
1845}
1846
1847static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
1848{
1849 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1850 struct mlxsw_sp_port_mapping port_mapping;
1851 int i;
1852 int err;
1853
1854 mlxsw_sp->port_mapping = kcalloc(max_ports,
1855 sizeof(struct mlxsw_sp_port_mapping *),
1856 GFP_KERNEL);
1857 if (!mlxsw_sp->port_mapping)
1858 return -ENOMEM;
1859
1860 for (i = 1; i < max_ports; i++) {
1861 if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
1862 continue;
1863
1864 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
1865 if (err)
1866 goto err_port_module_info_get;
1867 if (!port_mapping.width)
1868 continue;
1869
1870 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
1871 sizeof(port_mapping),
1872 GFP_KERNEL);
1873 if (!mlxsw_sp->port_mapping[i]) {
1874 err = -ENOMEM;
1875 goto err_port_module_info_dup;
1876 }
1877 }
1878 return 0;
1879
1880err_port_module_info_get:
1881err_port_module_info_dup:
1882 for (i--; i >= 1; i--)
1883 kfree(mlxsw_sp->port_mapping[i]);
1884 kfree(mlxsw_sp->port_mapping);
1885 return err;
1886}
1887
1888static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
1889{
1890 int i;
1891
1892 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1893 kfree(mlxsw_sp->port_mapping[i]);
1894 kfree(mlxsw_sp->port_mapping);
1895}
1896
1897static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
1898{
1899 u8 offset = (local_port - 1) % max_width;
1900
1901 return local_port - offset;
1902}
1903
1904static int
1905mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1906 struct mlxsw_sp_port_mapping *port_mapping,
1907 unsigned int count, u8 offset)
1908{
1909 struct mlxsw_sp_port_mapping split_port_mapping;
1910 int err, i;
1911
1912 split_port_mapping = *port_mapping;
1913 split_port_mapping.width /= count;
1914 for (i = 0; i < count; i++) {
1915 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
1916 base_port, &split_port_mapping);
1917 if (err)
1918 goto err_port_create;
1919 split_port_mapping.lane += split_port_mapping.width;
1920 }
1921
1922 return 0;
1923
1924err_port_create:
1925 for (i--; i >= 0; i--)
1926 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1927 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1928 return err;
1929}
1930
1931static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1932 u8 base_port,
1933 unsigned int count, u8 offset)
1934{
1935 struct mlxsw_sp_port_mapping *port_mapping;
1936 int i;
1937
1938
1939 for (i = 0; i < count * offset; i++) {
1940 port_mapping = mlxsw_sp->port_mapping[base_port + i];
1941 if (!port_mapping)
1942 continue;
1943 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
1944 }
1945}
1946
1947static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
1948 unsigned int count,
1949 unsigned int max_width)
1950{
1951 enum mlxsw_res_id local_ports_in_x_res_id;
1952 int split_width = max_width / count;
1953
1954 if (split_width == 1)
1955 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
1956 else if (split_width == 2)
1957 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
1958 else if (split_width == 4)
1959 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
1960 else
1961 return -EINVAL;
1962
1963 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
1964 return -EINVAL;
1965 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
1966}
1967
1968static struct mlxsw_sp_port *
1969mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1970{
1971 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
1972 return mlxsw_sp->ports[local_port];
1973 return NULL;
1974}
1975
1976static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1977 unsigned int count,
1978 struct netlink_ext_ack *extack)
1979{
1980 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1981 struct mlxsw_sp_port_mapping port_mapping;
1982 struct mlxsw_sp_port *mlxsw_sp_port;
1983 int max_width;
1984 u8 base_port;
1985 int offset;
1986 int i;
1987 int err;
1988
1989 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
1990 if (!mlxsw_sp_port) {
1991 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1992 local_port);
1993 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
1994 return -EINVAL;
1995 }
1996
1997 max_width = mlxsw_core_module_max_width(mlxsw_core,
1998 mlxsw_sp_port->mapping.module);
1999 if (max_width < 0) {
2000 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2001 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2002 return max_width;
2003 }
2004
2005
2006 if (mlxsw_sp_port->mapping.width != max_width) {
2007 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
2008 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
2009 return -EINVAL;
2010 }
2011
2012 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2013 if (offset < 0) {
2014 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2015 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2016 return -EINVAL;
2017 }
2018
2019
2020
2021
2022 base_port = count == max_width ?
2023 mlxsw_sp_cluster_base_port_get(local_port, max_width) :
2024 local_port;
2025
2026 for (i = 0; i < count * offset; i++) {
2027
2028
2029
2030 if (i == 0 || (count == max_width && i == count / 2))
2031 continue;
2032
2033 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
2034 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2035 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
2036 return -EINVAL;
2037 }
2038 }
2039
2040 port_mapping = mlxsw_sp_port->mapping;
2041
2042 for (i = 0; i < count; i++)
2043 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2044 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2045
2046 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
2047 count, offset);
2048 if (err) {
2049 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2050 goto err_port_split_create;
2051 }
2052
2053 return 0;
2054
2055err_port_split_create:
2056 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2057 return err;
2058}
2059
2060static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
2061 struct netlink_ext_ack *extack)
2062{
2063 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2064 struct mlxsw_sp_port *mlxsw_sp_port;
2065 unsigned int count;
2066 int max_width;
2067 u8 base_port;
2068 int offset;
2069 int i;
2070
2071 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2072 if (!mlxsw_sp_port) {
2073 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2074 local_port);
2075 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2076 return -EINVAL;
2077 }
2078
2079 if (!mlxsw_sp_port->split) {
2080 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
2081 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2082 return -EINVAL;
2083 }
2084
2085 max_width = mlxsw_core_module_max_width(mlxsw_core,
2086 mlxsw_sp_port->mapping.module);
2087 if (max_width < 0) {
2088 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2089 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2090 return max_width;
2091 }
2092
2093 count = max_width / mlxsw_sp_port->mapping.width;
2094
2095 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2096 if (WARN_ON(offset < 0)) {
2097 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2098 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2099 return -EINVAL;
2100 }
2101
2102 base_port = mlxsw_sp_port->split_base_local_port;
2103
2104 for (i = 0; i < count; i++)
2105 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2106 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2107
2108 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2109
2110 return 0;
2111}
2112
2113static void
2114mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2115{
2116 int i;
2117
2118 for (i = 0; i < TC_MAX_QUEUE; i++)
2119 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2120}
2121
2122static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2123 char *pude_pl, void *priv)
2124{
2125 struct mlxsw_sp *mlxsw_sp = priv;
2126 struct mlxsw_sp_port *mlxsw_sp_port;
2127 enum mlxsw_reg_pude_oper_status status;
2128 unsigned int max_ports;
2129 u8 local_port;
2130
2131 max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2132 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2133
2134 if (WARN_ON_ONCE(local_port >= max_ports))
2135 return;
2136 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2137 if (!mlxsw_sp_port)
2138 return;
2139
2140 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2141 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2142 netdev_info(mlxsw_sp_port->dev, "link up\n");
2143 netif_carrier_on(mlxsw_sp_port->dev);
2144 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2145 } else {
2146 netdev_info(mlxsw_sp_port->dev, "link down\n");
2147 netif_carrier_off(mlxsw_sp_port->dev);
2148 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2149 }
2150}
2151
2152static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2153 char *mtpptr_pl, bool ingress)
2154{
2155 u8 local_port;
2156 u8 num_rec;
2157 int i;
2158
2159 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2160 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2161 for (i = 0; i < num_rec; i++) {
2162 u8 domain_number;
2163 u8 message_type;
2164 u16 sequence_id;
2165 u64 timestamp;
2166
2167 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2168 &domain_number, &sequence_id,
2169 ×tamp);
2170 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2171 message_type, domain_number,
2172 sequence_id, timestamp);
2173 }
2174}
2175
2176static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2177 char *mtpptr_pl, void *priv)
2178{
2179 struct mlxsw_sp *mlxsw_sp = priv;
2180
2181 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2182}
2183
2184static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2185 char *mtpptr_pl, void *priv)
2186{
2187 struct mlxsw_sp *mlxsw_sp = priv;
2188
2189 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2190}
2191
2192void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2193 u8 local_port, void *priv)
2194{
2195 struct mlxsw_sp *mlxsw_sp = priv;
2196 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2197 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2198
2199 if (unlikely(!mlxsw_sp_port)) {
2200 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2201 local_port);
2202 return;
2203 }
2204
2205 skb->dev = mlxsw_sp_port->dev;
2206
2207 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2208 u64_stats_update_begin(&pcpu_stats->syncp);
2209 pcpu_stats->rx_packets++;
2210 pcpu_stats->rx_bytes += skb->len;
2211 u64_stats_update_end(&pcpu_stats->syncp);
2212
2213 skb->protocol = eth_type_trans(skb, skb->dev);
2214 netif_receive_skb(skb);
2215}
2216
2217static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2218 void *priv)
2219{
2220 skb->offload_fwd_mark = 1;
2221 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2222}
2223
2224static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2225 u8 local_port, void *priv)
2226{
2227 skb->offload_l3_fwd_mark = 1;
2228 skb->offload_fwd_mark = 1;
2229 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2230}
2231
2232void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2233 u8 local_port)
2234{
2235 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2236}
2237
2238#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2239 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2240 _is_ctrl, SP_##_trap_group, DISCARD)
2241
2242#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2243 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2244 _is_ctrl, SP_##_trap_group, DISCARD)
2245
2246#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2247 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2248 _is_ctrl, SP_##_trap_group, DISCARD)
2249
2250#define MLXSW_SP_EVENTL(_func, _trap_id) \
2251 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2252
2253static const struct mlxsw_listener mlxsw_sp_listener[] = {
2254
2255 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2256
2257 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2258
2259 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2260 false),
2261 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2262 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2263 false),
2264 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2265 ROUTER_EXP, false),
2266 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2267 ROUTER_EXP, false),
2268 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2269 ROUTER_EXP, false),
2270 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2271 ROUTER_EXP, false),
2272
2273 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2274 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2275
2276 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2277};
2278
2279static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2280
2281 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2282 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2283};
2284
2285static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2286{
2287 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2288 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2289 enum mlxsw_reg_qpcr_ir_units ir_units;
2290 int max_cpu_policers;
2291 bool is_bytes;
2292 u8 burst_size;
2293 u32 rate;
2294 int i, err;
2295
2296 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2297 return -EIO;
2298
2299 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2300
2301 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2302 for (i = 0; i < max_cpu_policers; i++) {
2303 is_bytes = false;
2304 switch (i) {
2305 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2306 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2307 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2308 rate = 1024;
2309 burst_size = 7;
2310 break;
2311 default:
2312 continue;
2313 }
2314
2315 __set_bit(i, mlxsw_sp->trap->policers_usage);
2316 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2317 burst_size);
2318 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2319 if (err)
2320 return err;
2321 }
2322
2323 return 0;
2324}
2325
2326static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2327{
2328 char htgt_pl[MLXSW_REG_HTGT_LEN];
2329 enum mlxsw_reg_htgt_trap_group i;
2330 int max_cpu_policers;
2331 int max_trap_groups;
2332 u8 priority, tc;
2333 u16 policer_id;
2334 int err;
2335
2336 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2337 return -EIO;
2338
2339 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2340 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2341
2342 for (i = 0; i < max_trap_groups; i++) {
2343 policer_id = i;
2344 switch (i) {
2345 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2346 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2347 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2348 priority = 1;
2349 tc = 1;
2350 break;
2351 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2352 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2353 tc = MLXSW_REG_HTGT_DEFAULT_TC;
2354 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2355 break;
2356 default:
2357 continue;
2358 }
2359
2360 if (max_cpu_policers <= policer_id &&
2361 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2362 return -EIO;
2363
2364 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2365 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2366 if (err)
2367 return err;
2368 }
2369
2370 return 0;
2371}
2372
2373static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
2374 const struct mlxsw_listener listeners[],
2375 size_t listeners_count)
2376{
2377 int i;
2378 int err;
2379
2380 for (i = 0; i < listeners_count; i++) {
2381 err = mlxsw_core_trap_register(mlxsw_sp->core,
2382 &listeners[i],
2383 mlxsw_sp);
2384 if (err)
2385 goto err_listener_register;
2386
2387 }
2388 return 0;
2389
2390err_listener_register:
2391 for (i--; i >= 0; i--) {
2392 mlxsw_core_trap_unregister(mlxsw_sp->core,
2393 &listeners[i],
2394 mlxsw_sp);
2395 }
2396 return err;
2397}
2398
2399static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
2400 const struct mlxsw_listener listeners[],
2401 size_t listeners_count)
2402{
2403 int i;
2404
2405 for (i = 0; i < listeners_count; i++) {
2406 mlxsw_core_trap_unregister(mlxsw_sp->core,
2407 &listeners[i],
2408 mlxsw_sp);
2409 }
2410}
2411
2412static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2413{
2414 struct mlxsw_sp_trap *trap;
2415 u64 max_policers;
2416 int err;
2417
2418 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2419 return -EIO;
2420 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2421 trap = kzalloc(struct_size(trap, policers_usage,
2422 BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2423 if (!trap)
2424 return -ENOMEM;
2425 trap->max_policers = max_policers;
2426 mlxsw_sp->trap = trap;
2427
2428 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2429 if (err)
2430 goto err_cpu_policers_set;
2431
2432 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2433 if (err)
2434 goto err_trap_groups_set;
2435
2436 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
2437 ARRAY_SIZE(mlxsw_sp_listener));
2438 if (err)
2439 goto err_traps_register;
2440
2441 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
2442 mlxsw_sp->listeners_count);
2443 if (err)
2444 goto err_extra_traps_init;
2445
2446 return 0;
2447
2448err_extra_traps_init:
2449 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2450 ARRAY_SIZE(mlxsw_sp_listener));
2451err_traps_register:
2452err_trap_groups_set:
2453err_cpu_policers_set:
2454 kfree(trap);
2455 return err;
2456}
2457
2458static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2459{
2460 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
2461 mlxsw_sp->listeners_count);
2462 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2463 ARRAY_SIZE(mlxsw_sp_listener));
2464 kfree(mlxsw_sp->trap);
2465}
2466
2467#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2468
2469static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2470{
2471 char slcr_pl[MLXSW_REG_SLCR_LEN];
2472 u32 seed;
2473 int err;
2474
2475 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2476 MLXSW_SP_LAG_SEED_INIT);
2477 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2478 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2479 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2480 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2481 MLXSW_REG_SLCR_LAG_HASH_SIP |
2482 MLXSW_REG_SLCR_LAG_HASH_DIP |
2483 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2484 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2485 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2486 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2487 if (err)
2488 return err;
2489
2490 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2491 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2492 return -EIO;
2493
2494 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2495 sizeof(struct mlxsw_sp_upper),
2496 GFP_KERNEL);
2497 if (!mlxsw_sp->lags)
2498 return -ENOMEM;
2499
2500 return 0;
2501}
2502
2503static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2504{
2505 kfree(mlxsw_sp->lags);
2506}
2507
2508static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
2509{
2510 char htgt_pl[MLXSW_REG_HTGT_LEN];
2511 int err;
2512
2513 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
2514 MLXSW_REG_HTGT_INVALID_POLICER,
2515 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2516 MLXSW_REG_HTGT_DEFAULT_TC);
2517 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2518 if (err)
2519 return err;
2520
2521 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
2522 MLXSW_REG_HTGT_INVALID_POLICER,
2523 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2524 MLXSW_REG_HTGT_DEFAULT_TC);
2525 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2526 if (err)
2527 return err;
2528
2529 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
2530 MLXSW_REG_HTGT_INVALID_POLICER,
2531 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2532 MLXSW_REG_HTGT_DEFAULT_TC);
2533 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2534 if (err)
2535 return err;
2536
2537 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
2538 MLXSW_REG_HTGT_INVALID_POLICER,
2539 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2540 MLXSW_REG_HTGT_DEFAULT_TC);
2541 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2542}
2543
2544static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2545 .clock_init = mlxsw_sp1_ptp_clock_init,
2546 .clock_fini = mlxsw_sp1_ptp_clock_fini,
2547 .init = mlxsw_sp1_ptp_init,
2548 .fini = mlxsw_sp1_ptp_fini,
2549 .receive = mlxsw_sp1_ptp_receive,
2550 .transmitted = mlxsw_sp1_ptp_transmitted,
2551 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
2552 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
2553 .shaper_work = mlxsw_sp1_ptp_shaper_work,
2554 .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
2555 .get_stats_count = mlxsw_sp1_get_stats_count,
2556 .get_stats_strings = mlxsw_sp1_get_stats_strings,
2557 .get_stats = mlxsw_sp1_get_stats,
2558};
2559
2560static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2561 .clock_init = mlxsw_sp2_ptp_clock_init,
2562 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2563 .init = mlxsw_sp2_ptp_init,
2564 .fini = mlxsw_sp2_ptp_fini,
2565 .receive = mlxsw_sp2_ptp_receive,
2566 .transmitted = mlxsw_sp2_ptp_transmitted,
2567 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2568 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2569 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2570 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2571 .get_stats_count = mlxsw_sp2_get_stats_count,
2572 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2573 .get_stats = mlxsw_sp2_get_stats,
2574};
2575
2576struct mlxsw_sp_sample_trigger_node {
2577 struct mlxsw_sp_sample_trigger trigger;
2578 struct mlxsw_sp_sample_params params;
2579 struct rhash_head ht_node;
2580 struct rcu_head rcu;
2581 refcount_t refcount;
2582};
2583
2584static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2585 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2586 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2587 .key_len = sizeof(struct mlxsw_sp_sample_trigger),
2588 .automatic_shrinking = true,
2589};
2590
2591static void
2592mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2593 const struct mlxsw_sp_sample_trigger *trigger)
2594{
2595 memset(key, 0, sizeof(*key));
2596 key->type = trigger->type;
2597 key->local_port = trigger->local_port;
2598}
2599
2600
2601struct mlxsw_sp_sample_params *
2602mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2603 const struct mlxsw_sp_sample_trigger *trigger)
2604{
2605 struct mlxsw_sp_sample_trigger_node *trigger_node;
2606 struct mlxsw_sp_sample_trigger key;
2607
2608 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2609 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2610 mlxsw_sp_sample_trigger_ht_params);
2611 if (!trigger_node)
2612 return NULL;
2613
2614 return &trigger_node->params;
2615}
2616
2617static int
2618mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2619 const struct mlxsw_sp_sample_trigger *trigger,
2620 const struct mlxsw_sp_sample_params *params)
2621{
2622 struct mlxsw_sp_sample_trigger_node *trigger_node;
2623 int err;
2624
2625 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2626 if (!trigger_node)
2627 return -ENOMEM;
2628
2629 trigger_node->trigger = *trigger;
2630 trigger_node->params = *params;
2631 refcount_set(&trigger_node->refcount, 1);
2632
2633 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2634 &trigger_node->ht_node,
2635 mlxsw_sp_sample_trigger_ht_params);
2636 if (err)
2637 goto err_rhashtable_insert;
2638
2639 return 0;
2640
2641err_rhashtable_insert:
2642 kfree(trigger_node);
2643 return err;
2644}
2645
2646static void
2647mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2648 struct mlxsw_sp_sample_trigger_node *trigger_node)
2649{
2650 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2651 &trigger_node->ht_node,
2652 mlxsw_sp_sample_trigger_ht_params);
2653 kfree_rcu(trigger_node, rcu);
2654}
2655
2656int
2657mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2658 const struct mlxsw_sp_sample_trigger *trigger,
2659 const struct mlxsw_sp_sample_params *params,
2660 struct netlink_ext_ack *extack)
2661{
2662 struct mlxsw_sp_sample_trigger_node *trigger_node;
2663 struct mlxsw_sp_sample_trigger key;
2664
2665 ASSERT_RTNL();
2666
2667 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2668
2669 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2670 &key,
2671 mlxsw_sp_sample_trigger_ht_params);
2672 if (!trigger_node)
2673 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2674 params);
2675
2676 if (trigger_node->trigger.local_port) {
2677 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2678 return -EINVAL;
2679 }
2680
2681 if (trigger_node->params.psample_group != params->psample_group ||
2682 trigger_node->params.truncate != params->truncate ||
2683 trigger_node->params.rate != params->rate ||
2684 trigger_node->params.trunc_size != params->trunc_size) {
2685 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2686 return -EINVAL;
2687 }
2688
2689 refcount_inc(&trigger_node->refcount);
2690
2691 return 0;
2692}
2693
2694void
2695mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2696 const struct mlxsw_sp_sample_trigger *trigger)
2697{
2698 struct mlxsw_sp_sample_trigger_node *trigger_node;
2699 struct mlxsw_sp_sample_trigger key;
2700
2701 ASSERT_RTNL();
2702
2703 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2704
2705 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2706 &key,
2707 mlxsw_sp_sample_trigger_ht_params);
2708 if (!trigger_node)
2709 return;
2710
2711 if (!refcount_dec_and_test(&trigger_node->refcount))
2712 return;
2713
2714 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2715}
2716
2717static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2718 unsigned long event, void *ptr);
2719
2720static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2721 const struct mlxsw_bus_info *mlxsw_bus_info,
2722 struct netlink_ext_ack *extack)
2723{
2724 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2725 int err;
2726
2727 mlxsw_sp->core = mlxsw_core;
2728 mlxsw_sp->bus_info = mlxsw_bus_info;
2729
2730 mlxsw_core_emad_string_tlv_enable(mlxsw_core);
2731
2732 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2733 if (err) {
2734 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2735 return err;
2736 }
2737
2738 err = mlxsw_sp_kvdl_init(mlxsw_sp);
2739 if (err) {
2740 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
2741 return err;
2742 }
2743
2744 err = mlxsw_sp_fids_init(mlxsw_sp);
2745 if (err) {
2746 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
2747 goto err_fids_init;
2748 }
2749
2750 err = mlxsw_sp_policers_init(mlxsw_sp);
2751 if (err) {
2752 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
2753 goto err_policers_init;
2754 }
2755
2756 err = mlxsw_sp_traps_init(mlxsw_sp);
2757 if (err) {
2758 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
2759 goto err_traps_init;
2760 }
2761
2762 err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
2763 if (err) {
2764 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
2765 goto err_devlink_traps_init;
2766 }
2767
2768 err = mlxsw_sp_buffers_init(mlxsw_sp);
2769 if (err) {
2770 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2771 goto err_buffers_init;
2772 }
2773
2774 err = mlxsw_sp_lag_init(mlxsw_sp);
2775 if (err) {
2776 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2777 goto err_lag_init;
2778 }
2779
2780
2781
2782
2783 err = mlxsw_sp_span_init(mlxsw_sp);
2784 if (err) {
2785 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2786 goto err_span_init;
2787 }
2788
2789 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2790 if (err) {
2791 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2792 goto err_switchdev_init;
2793 }
2794
2795 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
2796 if (err) {
2797 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
2798 goto err_counter_pool_init;
2799 }
2800
2801 err = mlxsw_sp_afa_init(mlxsw_sp);
2802 if (err) {
2803 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
2804 goto err_afa_init;
2805 }
2806
2807 err = mlxsw_sp_nve_init(mlxsw_sp);
2808 if (err) {
2809 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
2810 goto err_nve_init;
2811 }
2812
2813 err = mlxsw_sp_acl_init(mlxsw_sp);
2814 if (err) {
2815 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
2816 goto err_acl_init;
2817 }
2818
2819 err = mlxsw_sp_router_init(mlxsw_sp, extack);
2820 if (err) {
2821 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2822 goto err_router_init;
2823 }
2824
2825 if (mlxsw_sp->bus_info->read_frc_capable) {
2826
2827 mlxsw_sp->clock =
2828 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
2829 mlxsw_sp->bus_info->dev);
2830 if (IS_ERR(mlxsw_sp->clock)) {
2831 err = PTR_ERR(mlxsw_sp->clock);
2832 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
2833 goto err_ptp_clock_init;
2834 }
2835 }
2836
2837 if (mlxsw_sp->clock) {
2838
2839 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
2840 if (IS_ERR(mlxsw_sp->ptp_state)) {
2841 err = PTR_ERR(mlxsw_sp->ptp_state);
2842 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
2843 goto err_ptp_init;
2844 }
2845 }
2846
2847
2848
2849
2850
2851 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
2852 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2853 &mlxsw_sp->netdevice_nb);
2854 if (err) {
2855 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
2856 goto err_netdev_notifier;
2857 }
2858
2859 err = mlxsw_sp_dpipe_init(mlxsw_sp);
2860 if (err) {
2861 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
2862 goto err_dpipe_init;
2863 }
2864
2865 err = mlxsw_sp_port_module_info_init(mlxsw_sp);
2866 if (err) {
2867 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
2868 goto err_port_module_info_init;
2869 }
2870
2871 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
2872 &mlxsw_sp_sample_trigger_ht_params);
2873 if (err) {
2874 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
2875 goto err_sample_trigger_init;
2876 }
2877
2878 err = mlxsw_sp_ports_create(mlxsw_sp);
2879 if (err) {
2880 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2881 goto err_ports_create;
2882 }
2883
2884 return 0;
2885
2886err_ports_create:
2887 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
2888err_sample_trigger_init:
2889 mlxsw_sp_port_module_info_fini(mlxsw_sp);
2890err_port_module_info_init:
2891 mlxsw_sp_dpipe_fini(mlxsw_sp);
2892err_dpipe_init:
2893 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2894 &mlxsw_sp->netdevice_nb);
2895err_netdev_notifier:
2896 if (mlxsw_sp->clock)
2897 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2898err_ptp_init:
2899 if (mlxsw_sp->clock)
2900 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2901err_ptp_clock_init:
2902 mlxsw_sp_router_fini(mlxsw_sp);
2903err_router_init:
2904 mlxsw_sp_acl_fini(mlxsw_sp);
2905err_acl_init:
2906 mlxsw_sp_nve_fini(mlxsw_sp);
2907err_nve_init:
2908 mlxsw_sp_afa_fini(mlxsw_sp);
2909err_afa_init:
2910 mlxsw_sp_counter_pool_fini(mlxsw_sp);
2911err_counter_pool_init:
2912 mlxsw_sp_switchdev_fini(mlxsw_sp);
2913err_switchdev_init:
2914 mlxsw_sp_span_fini(mlxsw_sp);
2915err_span_init:
2916 mlxsw_sp_lag_fini(mlxsw_sp);
2917err_lag_init:
2918 mlxsw_sp_buffers_fini(mlxsw_sp);
2919err_buffers_init:
2920 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2921err_devlink_traps_init:
2922 mlxsw_sp_traps_fini(mlxsw_sp);
2923err_traps_init:
2924 mlxsw_sp_policers_fini(mlxsw_sp);
2925err_policers_init:
2926 mlxsw_sp_fids_fini(mlxsw_sp);
2927err_fids_init:
2928 mlxsw_sp_kvdl_fini(mlxsw_sp);
2929 return err;
2930}
2931
2932static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
2933 const struct mlxsw_bus_info *mlxsw_bus_info,
2934 struct netlink_ext_ack *extack)
2935{
2936 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2937
2938 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
2939 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
2940 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
2941 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
2942 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
2943 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
2944 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
2945 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
2946 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
2947 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
2948 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
2949 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
2950 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
2951 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
2952 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
2953 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
2954 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
2955 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
2956 mlxsw_sp->listeners = mlxsw_sp1_listener;
2957 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
2958 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
2959
2960 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2961}
2962
2963static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
2964 const struct mlxsw_bus_info *mlxsw_bus_info,
2965 struct netlink_ext_ack *extack)
2966{
2967 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2968
2969 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
2970 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2971 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2972 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2973 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2974 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2975 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2976 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2977 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2978 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2979 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
2980 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2981 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2982 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
2983 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2984 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2985 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
2986 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
2987 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
2988
2989 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2990}
2991
2992static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
2993 const struct mlxsw_bus_info *mlxsw_bus_info,
2994 struct netlink_ext_ack *extack)
2995{
2996 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2997
2998 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
2999 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3000 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3001 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3002 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3003 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3004 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3005 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3006 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3007 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3008 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3009 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3010 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3011 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3012 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3013 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3014 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3015 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3016 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3017
3018 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3019}
3020
3021static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3022{
3023 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3024
3025 mlxsw_sp_ports_remove(mlxsw_sp);
3026 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3027 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3028 mlxsw_sp_dpipe_fini(mlxsw_sp);
3029 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3030 &mlxsw_sp->netdevice_nb);
3031 if (mlxsw_sp->clock) {
3032 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3033 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3034 }
3035 mlxsw_sp_router_fini(mlxsw_sp);
3036 mlxsw_sp_acl_fini(mlxsw_sp);
3037 mlxsw_sp_nve_fini(mlxsw_sp);
3038 mlxsw_sp_afa_fini(mlxsw_sp);
3039 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3040 mlxsw_sp_switchdev_fini(mlxsw_sp);
3041 mlxsw_sp_span_fini(mlxsw_sp);
3042 mlxsw_sp_lag_fini(mlxsw_sp);
3043 mlxsw_sp_buffers_fini(mlxsw_sp);
3044 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3045 mlxsw_sp_traps_fini(mlxsw_sp);
3046 mlxsw_sp_policers_fini(mlxsw_sp);
3047 mlxsw_sp_fids_fini(mlxsw_sp);
3048 mlxsw_sp_kvdl_fini(mlxsw_sp);
3049}
3050
3051
3052
3053
3054#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
3055 VLAN_VID_MASK - 1)
3056
3057static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3058 .used_max_mid = 1,
3059 .max_mid = MLXSW_SP_MID_MAX,
3060 .used_flood_tables = 1,
3061 .used_flood_mode = 1,
3062 .flood_mode = 3,
3063 .max_fid_flood_tables = 3,
3064 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
3065 .used_max_ib_mc = 1,
3066 .max_ib_mc = 0,
3067 .used_max_pkey = 1,
3068 .max_pkey = 0,
3069 .used_kvd_sizes = 1,
3070 .kvd_hash_single_parts = 59,
3071 .kvd_hash_double_parts = 41,
3072 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3073 .swid_config = {
3074 {
3075 .used_type = 1,
3076 .type = MLXSW_PORT_SWID_TYPE_ETH,
3077 }
3078 },
3079};
3080
3081static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3082 .used_max_mid = 1,
3083 .max_mid = MLXSW_SP_MID_MAX,
3084 .used_flood_tables = 1,
3085 .used_flood_mode = 1,
3086 .flood_mode = 3,
3087 .max_fid_flood_tables = 3,
3088 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
3089 .used_max_ib_mc = 1,
3090 .max_ib_mc = 0,
3091 .used_max_pkey = 1,
3092 .max_pkey = 0,
3093 .used_kvh_xlt_cache_mode = 1,
3094 .kvh_xlt_cache_mode = 1,
3095 .swid_config = {
3096 {
3097 .used_type = 1,
3098 .type = MLXSW_PORT_SWID_TYPE_ETH,
3099 }
3100 },
3101};
3102
3103static void
3104mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3105 struct devlink_resource_size_params *kvd_size_params,
3106 struct devlink_resource_size_params *linear_size_params,
3107 struct devlink_resource_size_params *hash_double_size_params,
3108 struct devlink_resource_size_params *hash_single_size_params)
3109{
3110 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3111 KVD_SINGLE_MIN_SIZE);
3112 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3113 KVD_DOUBLE_MIN_SIZE);
3114 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3115 u32 linear_size_min = 0;
3116
3117 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3118 MLXSW_SP_KVD_GRANULARITY,
3119 DEVLINK_RESOURCE_UNIT_ENTRY);
3120 devlink_resource_size_params_init(linear_size_params, linear_size_min,
3121 kvd_size - single_size_min -
3122 double_size_min,
3123 MLXSW_SP_KVD_GRANULARITY,
3124 DEVLINK_RESOURCE_UNIT_ENTRY);
3125 devlink_resource_size_params_init(hash_double_size_params,
3126 double_size_min,
3127 kvd_size - single_size_min -
3128 linear_size_min,
3129 MLXSW_SP_KVD_GRANULARITY,
3130 DEVLINK_RESOURCE_UNIT_ENTRY);
3131 devlink_resource_size_params_init(hash_single_size_params,
3132 single_size_min,
3133 kvd_size - double_size_min -
3134 linear_size_min,
3135 MLXSW_SP_KVD_GRANULARITY,
3136 DEVLINK_RESOURCE_UNIT_ENTRY);
3137}
3138
3139static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3140{
3141 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3142 struct devlink_resource_size_params hash_single_size_params;
3143 struct devlink_resource_size_params hash_double_size_params;
3144 struct devlink_resource_size_params linear_size_params;
3145 struct devlink_resource_size_params kvd_size_params;
3146 u32 kvd_size, single_size, double_size, linear_size;
3147 const struct mlxsw_config_profile *profile;
3148 int err;
3149
3150 profile = &mlxsw_sp1_config_profile;
3151 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3152 return -EIO;
3153
3154 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3155 &linear_size_params,
3156 &hash_double_size_params,
3157 &hash_single_size_params);
3158
3159 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3160 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3161 kvd_size, MLXSW_SP_RESOURCE_KVD,
3162 DEVLINK_RESOURCE_ID_PARENT_TOP,
3163 &kvd_size_params);
3164 if (err)
3165 return err;
3166
3167 linear_size = profile->kvd_linear_size;
3168 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3169 linear_size,
3170 MLXSW_SP_RESOURCE_KVD_LINEAR,
3171 MLXSW_SP_RESOURCE_KVD,
3172 &linear_size_params);
3173 if (err)
3174 return err;
3175
3176 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3177 if (err)
3178 return err;
3179
3180 double_size = kvd_size - linear_size;
3181 double_size *= profile->kvd_hash_double_parts;
3182 double_size /= profile->kvd_hash_double_parts +
3183 profile->kvd_hash_single_parts;
3184 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3185 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3186 double_size,
3187 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3188 MLXSW_SP_RESOURCE_KVD,
3189 &hash_double_size_params);
3190 if (err)
3191 return err;
3192
3193 single_size = kvd_size - double_size - linear_size;
3194 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3195 single_size,
3196 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3197 MLXSW_SP_RESOURCE_KVD,
3198 &hash_single_size_params);
3199 if (err)
3200 return err;
3201
3202 return 0;
3203}
3204
3205static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3206{
3207 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3208 struct devlink_resource_size_params kvd_size_params;
3209 u32 kvd_size;
3210
3211 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3212 return -EIO;
3213
3214 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3215 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3216 MLXSW_SP_KVD_GRANULARITY,
3217 DEVLINK_RESOURCE_UNIT_ENTRY);
3218
3219 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3220 kvd_size, MLXSW_SP_RESOURCE_KVD,
3221 DEVLINK_RESOURCE_ID_PARENT_TOP,
3222 &kvd_size_params);
3223}
3224
3225static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3226{
3227 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3228 struct devlink_resource_size_params span_size_params;
3229 u32 max_span;
3230
3231 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3232 return -EIO;
3233
3234 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3235 devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3236 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3237
3238 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3239 max_span, MLXSW_SP_RESOURCE_SPAN,
3240 DEVLINK_RESOURCE_ID_PARENT_TOP,
3241 &span_size_params);
3242}
3243
3244static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3245{
3246 int err;
3247
3248 err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3249 if (err)
3250 return err;
3251
3252 err = mlxsw_sp_resources_span_register(mlxsw_core);
3253 if (err)
3254 goto err_resources_span_register;
3255
3256 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3257 if (err)
3258 goto err_resources_counter_register;
3259
3260 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3261 if (err)
3262 goto err_resources_counter_register;
3263
3264 return 0;
3265
3266err_resources_counter_register:
3267err_resources_span_register:
3268 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3269 return err;
3270}
3271
3272static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3273{
3274 int err;
3275
3276 err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3277 if (err)
3278 return err;
3279
3280 err = mlxsw_sp_resources_span_register(mlxsw_core);
3281 if (err)
3282 goto err_resources_span_register;
3283
3284 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3285 if (err)
3286 goto err_resources_counter_register;
3287
3288 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3289 if (err)
3290 goto err_resources_counter_register;
3291
3292 return 0;
3293
3294err_resources_counter_register:
3295err_resources_span_register:
3296 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3297 return err;
3298}
3299
3300static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3301 const struct mlxsw_config_profile *profile,
3302 u64 *p_single_size, u64 *p_double_size,
3303 u64 *p_linear_size)
3304{
3305 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3306 u32 double_size;
3307 int err;
3308
3309 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3310 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3311 return -EIO;
3312
3313
3314
3315
3316
3317
3318
3319
3320 err = devlink_resource_size_get(devlink,
3321 MLXSW_SP_RESOURCE_KVD_LINEAR,
3322 p_linear_size);
3323 if (err)
3324 *p_linear_size = profile->kvd_linear_size;
3325
3326 err = devlink_resource_size_get(devlink,
3327 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3328 p_double_size);
3329 if (err) {
3330 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3331 *p_linear_size;
3332 double_size *= profile->kvd_hash_double_parts;
3333 double_size /= profile->kvd_hash_double_parts +
3334 profile->kvd_hash_single_parts;
3335 *p_double_size = rounddown(double_size,
3336 MLXSW_SP_KVD_GRANULARITY);
3337 }
3338
3339 err = devlink_resource_size_get(devlink,
3340 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3341 p_single_size);
3342 if (err)
3343 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3344 *p_double_size - *p_linear_size;
3345
3346
3347 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3348 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3349 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3350 return -EIO;
3351
3352 return 0;
3353}
3354
3355static int
3356mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3357 struct devlink_param_gset_ctx *ctx)
3358{
3359 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3360 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3361
3362 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3363 return 0;
3364}
3365
3366static int
3367mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3368 struct devlink_param_gset_ctx *ctx)
3369{
3370 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3371 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3372
3373 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3374}
3375
3376static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3377 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3378 "acl_region_rehash_interval",
3379 DEVLINK_PARAM_TYPE_U32,
3380 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3381 mlxsw_sp_params_acl_region_rehash_intrvl_get,
3382 mlxsw_sp_params_acl_region_rehash_intrvl_set,
3383 NULL),
3384};
3385
3386static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3387{
3388 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3389 union devlink_param_value value;
3390 int err;
3391
3392 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3393 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3394 if (err)
3395 return err;
3396
3397 value.vu32 = 0;
3398 devlink_param_driverinit_value_set(devlink,
3399 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3400 value);
3401 return 0;
3402}
3403
3404static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3405{
3406 devlink_params_unregister(priv_to_devlink(mlxsw_core),
3407 mlxsw_sp2_devlink_params,
3408 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3409}
3410
3411static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3412 struct sk_buff *skb, u8 local_port)
3413{
3414 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3415
3416 skb_pull(skb, MLXSW_TXHDR_LEN);
3417 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3418}
3419
3420static struct mlxsw_driver mlxsw_sp1_driver = {
3421 .kind = mlxsw_sp1_driver_name,
3422 .priv_size = sizeof(struct mlxsw_sp),
3423 .fw_req_rev = &mlxsw_sp1_fw_rev,
3424 .fw_filename = MLXSW_SP1_FW_FILENAME,
3425 .init = mlxsw_sp1_init,
3426 .fini = mlxsw_sp_fini,
3427 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3428 .port_split = mlxsw_sp_port_split,
3429 .port_unsplit = mlxsw_sp_port_unsplit,
3430 .sb_pool_get = mlxsw_sp_sb_pool_get,
3431 .sb_pool_set = mlxsw_sp_sb_pool_set,
3432 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3433 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3434 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3435 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3436 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3437 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3438 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3439 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3440 .trap_init = mlxsw_sp_trap_init,
3441 .trap_fini = mlxsw_sp_trap_fini,
3442 .trap_action_set = mlxsw_sp_trap_action_set,
3443 .trap_group_init = mlxsw_sp_trap_group_init,
3444 .trap_group_set = mlxsw_sp_trap_group_set,
3445 .trap_policer_init = mlxsw_sp_trap_policer_init,
3446 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3447 .trap_policer_set = mlxsw_sp_trap_policer_set,
3448 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3449 .txhdr_construct = mlxsw_sp_txhdr_construct,
3450 .resources_register = mlxsw_sp1_resources_register,
3451 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
3452 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3453 .txhdr_len = MLXSW_TXHDR_LEN,
3454 .profile = &mlxsw_sp1_config_profile,
3455 .res_query_enabled = true,
3456 .fw_fatal_enabled = true,
3457 .temp_warn_enabled = true,
3458};
3459
3460static struct mlxsw_driver mlxsw_sp2_driver = {
3461 .kind = mlxsw_sp2_driver_name,
3462 .priv_size = sizeof(struct mlxsw_sp),
3463 .fw_req_rev = &mlxsw_sp2_fw_rev,
3464 .fw_filename = MLXSW_SP2_FW_FILENAME,
3465 .init = mlxsw_sp2_init,
3466 .fini = mlxsw_sp_fini,
3467 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3468 .port_split = mlxsw_sp_port_split,
3469 .port_unsplit = mlxsw_sp_port_unsplit,
3470 .sb_pool_get = mlxsw_sp_sb_pool_get,
3471 .sb_pool_set = mlxsw_sp_sb_pool_set,
3472 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3473 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3474 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3475 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3476 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3477 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3478 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3479 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3480 .trap_init = mlxsw_sp_trap_init,
3481 .trap_fini = mlxsw_sp_trap_fini,
3482 .trap_action_set = mlxsw_sp_trap_action_set,
3483 .trap_group_init = mlxsw_sp_trap_group_init,
3484 .trap_group_set = mlxsw_sp_trap_group_set,
3485 .trap_policer_init = mlxsw_sp_trap_policer_init,
3486 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3487 .trap_policer_set = mlxsw_sp_trap_policer_set,
3488 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3489 .txhdr_construct = mlxsw_sp_txhdr_construct,
3490 .resources_register = mlxsw_sp2_resources_register,
3491 .params_register = mlxsw_sp2_params_register,
3492 .params_unregister = mlxsw_sp2_params_unregister,
3493 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3494 .txhdr_len = MLXSW_TXHDR_LEN,
3495 .profile = &mlxsw_sp2_config_profile,
3496 .res_query_enabled = true,
3497 .fw_fatal_enabled = true,
3498 .temp_warn_enabled = true,
3499};
3500
3501static struct mlxsw_driver mlxsw_sp3_driver = {
3502 .kind = mlxsw_sp3_driver_name,
3503 .priv_size = sizeof(struct mlxsw_sp),
3504 .fw_req_rev = &mlxsw_sp3_fw_rev,
3505 .fw_filename = MLXSW_SP3_FW_FILENAME,
3506 .init = mlxsw_sp3_init,
3507 .fini = mlxsw_sp_fini,
3508 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3509 .port_split = mlxsw_sp_port_split,
3510 .port_unsplit = mlxsw_sp_port_unsplit,
3511 .sb_pool_get = mlxsw_sp_sb_pool_get,
3512 .sb_pool_set = mlxsw_sp_sb_pool_set,
3513 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3514 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3515 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3516 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3517 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3518 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3519 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3520 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3521 .trap_init = mlxsw_sp_trap_init,
3522 .trap_fini = mlxsw_sp_trap_fini,
3523 .trap_action_set = mlxsw_sp_trap_action_set,
3524 .trap_group_init = mlxsw_sp_trap_group_init,
3525 .trap_group_set = mlxsw_sp_trap_group_set,
3526 .trap_policer_init = mlxsw_sp_trap_policer_init,
3527 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3528 .trap_policer_set = mlxsw_sp_trap_policer_set,
3529 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3530 .txhdr_construct = mlxsw_sp_txhdr_construct,
3531 .resources_register = mlxsw_sp2_resources_register,
3532 .params_register = mlxsw_sp2_params_register,
3533 .params_unregister = mlxsw_sp2_params_unregister,
3534 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3535 .txhdr_len = MLXSW_TXHDR_LEN,
3536 .profile = &mlxsw_sp2_config_profile,
3537 .res_query_enabled = true,
3538 .fw_fatal_enabled = true,
3539 .temp_warn_enabled = true,
3540};
3541
3542bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3543{
3544 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3545}
3546
3547static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
3548 struct netdev_nested_priv *priv)
3549{
3550 int ret = 0;
3551
3552 if (mlxsw_sp_port_dev_check(lower_dev)) {
3553 priv->data = (void *)netdev_priv(lower_dev);
3554 ret = 1;
3555 }
3556
3557 return ret;
3558}
3559
3560struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3561{
3562 struct netdev_nested_priv priv = {
3563 .data = NULL,
3564 };
3565
3566 if (mlxsw_sp_port_dev_check(dev))
3567 return netdev_priv(dev);
3568
3569 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
3570
3571 return (struct mlxsw_sp_port *)priv.data;
3572}
3573
3574struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3575{
3576 struct mlxsw_sp_port *mlxsw_sp_port;
3577
3578 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3579 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3580}
3581
3582struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3583{
3584 struct netdev_nested_priv priv = {
3585 .data = NULL,
3586 };
3587
3588 if (mlxsw_sp_port_dev_check(dev))
3589 return netdev_priv(dev);
3590
3591 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3592 &priv);
3593
3594 return (struct mlxsw_sp_port *)priv.data;
3595}
3596
3597struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3598{
3599 struct mlxsw_sp_port *mlxsw_sp_port;
3600
3601 rcu_read_lock();
3602 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3603 if (mlxsw_sp_port)
3604 dev_hold(mlxsw_sp_port->dev);
3605 rcu_read_unlock();
3606 return mlxsw_sp_port;
3607}
3608
3609void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3610{
3611 dev_put(mlxsw_sp_port->dev);
3612}
3613
3614static void
3615mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3616 struct net_device *lag_dev)
3617{
3618 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3619 struct net_device *upper_dev;
3620 struct list_head *iter;
3621
3622 if (netif_is_bridge_port(lag_dev))
3623 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3624
3625 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3626 if (!netif_is_bridge_port(upper_dev))
3627 continue;
3628 br_dev = netdev_master_upper_dev_get(upper_dev);
3629 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3630 }
3631}
3632
3633static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3634{
3635 char sldr_pl[MLXSW_REG_SLDR_LEN];
3636
3637 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3638 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3639}
3640
3641static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3642{
3643 char sldr_pl[MLXSW_REG_SLDR_LEN];
3644
3645 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3646 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3647}
3648
3649static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3650 u16 lag_id, u8 port_index)
3651{
3652 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3653 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3654
3655 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3656 lag_id, port_index);
3657 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3658}
3659
3660static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3661 u16 lag_id)
3662{
3663 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3664 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3665
3666 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3667 lag_id);
3668 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3669}
3670
3671static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3672 u16 lag_id)
3673{
3674 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3675 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3676
3677 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3678 lag_id);
3679 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3680}
3681
3682static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3683 u16 lag_id)
3684{
3685 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3686 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3687
3688 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3689 lag_id);
3690 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3691}
3692
3693static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3694 struct net_device *lag_dev,
3695 u16 *p_lag_id)
3696{
3697 struct mlxsw_sp_upper *lag;
3698 int free_lag_id = -1;
3699 u64 max_lag;
3700 int i;
3701
3702 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3703 for (i = 0; i < max_lag; i++) {
3704 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3705 if (lag->ref_count) {
3706 if (lag->dev == lag_dev) {
3707 *p_lag_id = i;
3708 return 0;
3709 }
3710 } else if (free_lag_id < 0) {
3711 free_lag_id = i;
3712 }
3713 }
3714 if (free_lag_id < 0)
3715 return -EBUSY;
3716 *p_lag_id = free_lag_id;
3717 return 0;
3718}
3719
3720static bool
3721mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3722 struct net_device *lag_dev,
3723 struct netdev_lag_upper_info *lag_upper_info,
3724 struct netlink_ext_ack *extack)
3725{
3726 u16 lag_id;
3727
3728 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
3729 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
3730 return false;
3731 }
3732 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
3733 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
3734 return false;
3735 }
3736 return true;
3737}
3738
3739static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3740 u16 lag_id, u8 *p_port_index)
3741{
3742 u64 max_lag_members;
3743 int i;
3744
3745 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3746 MAX_LAG_MEMBERS);
3747 for (i = 0; i < max_lag_members; i++) {
3748 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3749 *p_port_index = i;
3750 return 0;
3751 }
3752 }
3753 return -EBUSY;
3754}
3755
3756static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3757 struct net_device *lag_dev,
3758 struct netlink_ext_ack *extack)
3759{
3760 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3761 struct mlxsw_sp_upper *lag;
3762 u16 lag_id;
3763 u8 port_index;
3764 int err;
3765
3766 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3767 if (err)
3768 return err;
3769 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3770 if (!lag->ref_count) {
3771 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3772 if (err)
3773 return err;
3774 lag->dev = lag_dev;
3775 }
3776
3777 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3778 if (err)
3779 return err;
3780 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3781 if (err)
3782 goto err_col_port_add;
3783
3784 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3785 mlxsw_sp_port->local_port);
3786 mlxsw_sp_port->lag_id = lag_id;
3787 mlxsw_sp_port->lagged = 1;
3788 lag->ref_count++;
3789
3790
3791 if (mlxsw_sp_port->default_vlan->fid)
3792 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
3793
3794
3795 err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
3796 lag_dev, extack);
3797 if (err)
3798 goto err_router_join;
3799
3800 return 0;
3801
3802err_router_join:
3803 lag->ref_count--;
3804 mlxsw_sp_port->lagged = 0;
3805 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3806 mlxsw_sp_port->local_port);
3807 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3808err_col_port_add:
3809 if (!lag->ref_count)
3810 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3811 return err;
3812}
3813
3814static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3815 struct net_device *lag_dev)
3816{
3817 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3818 u16 lag_id = mlxsw_sp_port->lag_id;
3819 struct mlxsw_sp_upper *lag;
3820
3821 if (!mlxsw_sp_port->lagged)
3822 return;
3823 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3824 WARN_ON(lag->ref_count == 0);
3825
3826 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3827
3828
3829 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
3830 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
3831
3832
3833
3834 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
3835
3836 if (lag->ref_count == 1)
3837 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3838
3839 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3840 mlxsw_sp_port->local_port);
3841 mlxsw_sp_port->lagged = 0;
3842 lag->ref_count--;
3843
3844
3845 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
3846 ETH_P_8021Q);
3847}
3848
3849static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3850 u16 lag_id)
3851{
3852 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3853 char sldr_pl[MLXSW_REG_SLDR_LEN];
3854
3855 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3856 mlxsw_sp_port->local_port);
3857 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3858}
3859
3860static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3861 u16 lag_id)
3862{
3863 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3864 char sldr_pl[MLXSW_REG_SLDR_LEN];
3865
3866 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3867 mlxsw_sp_port->local_port);
3868 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3869}
3870
3871static int
3872mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
3873{
3874 int err;
3875
3876 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
3877 mlxsw_sp_port->lag_id);
3878 if (err)
3879 return err;
3880
3881 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3882 if (err)
3883 goto err_dist_port_add;
3884
3885 return 0;
3886
3887err_dist_port_add:
3888 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3889 return err;
3890}
3891
3892static int
3893mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
3894{
3895 int err;
3896
3897 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3898 mlxsw_sp_port->lag_id);
3899 if (err)
3900 return err;
3901
3902 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
3903 mlxsw_sp_port->lag_id);
3904 if (err)
3905 goto err_col_port_disable;
3906
3907 return 0;
3908
3909err_col_port_disable:
3910 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3911 return err;
3912}
3913
3914static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3915 struct netdev_lag_lower_state_info *info)
3916{
3917 if (info->tx_enabled)
3918 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
3919 else
3920 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3921}
3922
3923static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
3924 bool enable)
3925{
3926 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3927 enum mlxsw_reg_spms_state spms_state;
3928 char *spms_pl;
3929 u16 vid;
3930 int err;
3931
3932 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
3933 MLXSW_REG_SPMS_STATE_DISCARDING;
3934
3935 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
3936 if (!spms_pl)
3937 return -ENOMEM;
3938 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
3939
3940 for (vid = 0; vid < VLAN_N_VID; vid++)
3941 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
3942
3943 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
3944 kfree(spms_pl);
3945 return err;
3946}
3947
3948static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
3949{
3950 u16 vid = 1;
3951 int err;
3952
3953 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
3954 if (err)
3955 return err;
3956 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
3957 if (err)
3958 goto err_port_stp_set;
3959 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3960 true, false);
3961 if (err)
3962 goto err_port_vlan_set;
3963
3964 for (; vid <= VLAN_N_VID - 1; vid++) {
3965 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3966 vid, false);
3967 if (err)
3968 goto err_vid_learning_set;
3969 }
3970
3971 return 0;
3972
3973err_vid_learning_set:
3974 for (vid--; vid >= 1; vid--)
3975 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
3976err_port_vlan_set:
3977 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3978err_port_stp_set:
3979 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3980 return err;
3981}
3982
3983static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3984{
3985 u16 vid;
3986
3987 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
3988 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3989 vid, true);
3990
3991 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3992 false, false);
3993 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3994 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3995}
3996
3997static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
3998{
3999 unsigned int num_vxlans = 0;
4000 struct net_device *dev;
4001 struct list_head *iter;
4002
4003 netdev_for_each_lower_dev(br_dev, dev, iter) {
4004 if (netif_is_vxlan(dev))
4005 num_vxlans++;
4006 }
4007
4008 return num_vxlans > 1;
4009}
4010
4011static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4012{
4013 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4014 struct net_device *dev;
4015 struct list_head *iter;
4016
4017 netdev_for_each_lower_dev(br_dev, dev, iter) {
4018 u16 pvid;
4019 int err;
4020
4021 if (!netif_is_vxlan(dev))
4022 continue;
4023
4024 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4025 if (err || !pvid)
4026 continue;
4027
4028 if (test_and_set_bit(pvid, vlans))
4029 return false;
4030 }
4031
4032 return true;
4033}
4034
4035static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4036 struct netlink_ext_ack *extack)
4037{
4038 if (br_multicast_enabled(br_dev)) {
4039 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4040 return false;
4041 }
4042
4043 if (!br_vlan_enabled(br_dev) &&
4044 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4045 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4046 return false;
4047 }
4048
4049 if (br_vlan_enabled(br_dev) &&
4050 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4051 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4052 return false;
4053 }
4054
4055 return true;
4056}
4057
4058static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4059 struct net_device *dev,
4060 unsigned long event, void *ptr)
4061{
4062 struct netdev_notifier_changeupper_info *info;
4063 struct mlxsw_sp_port *mlxsw_sp_port;
4064 struct netlink_ext_ack *extack;
4065 struct net_device *upper_dev;
4066 struct mlxsw_sp *mlxsw_sp;
4067 int err = 0;
4068 u16 proto;
4069
4070 mlxsw_sp_port = netdev_priv(dev);
4071 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4072 info = ptr;
4073 extack = netdev_notifier_info_to_extack(&info->info);
4074
4075 switch (event) {
4076 case NETDEV_PRECHANGEUPPER:
4077 upper_dev = info->upper_dev;
4078 if (!is_vlan_dev(upper_dev) &&
4079 !netif_is_lag_master(upper_dev) &&
4080 !netif_is_bridge_master(upper_dev) &&
4081 !netif_is_ovs_master(upper_dev) &&
4082 !netif_is_macvlan(upper_dev)) {
4083 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4084 return -EINVAL;
4085 }
4086 if (!info->linking)
4087 break;
4088 if (netif_is_bridge_master(upper_dev) &&
4089 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4090 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4091 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4092 return -EOPNOTSUPP;
4093 if (netdev_has_any_upper_dev(upper_dev) &&
4094 (!netif_is_bridge_master(upper_dev) ||
4095 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4096 upper_dev))) {
4097 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4098 return -EINVAL;
4099 }
4100 if (netif_is_lag_master(upper_dev) &&
4101 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4102 info->upper_info, extack))
4103 return -EINVAL;
4104 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4105 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4106 return -EINVAL;
4107 }
4108 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4109 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4110 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4111 return -EINVAL;
4112 }
4113 if (netif_is_macvlan(upper_dev) &&
4114 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
4115 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4116 return -EOPNOTSUPP;
4117 }
4118 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4119 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4120 return -EINVAL;
4121 }
4122 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4123 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4124 return -EINVAL;
4125 }
4126 if (netif_is_bridge_master(upper_dev)) {
4127 br_vlan_get_proto(upper_dev, &proto);
4128 if (br_vlan_enabled(upper_dev) &&
4129 proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4130 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4131 return -EOPNOTSUPP;
4132 }
4133 if (vlan_uses_dev(lower_dev) &&
4134 br_vlan_enabled(upper_dev) &&
4135 proto == ETH_P_8021AD) {
4136 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4137 return -EOPNOTSUPP;
4138 }
4139 }
4140 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4141 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4142
4143 if (br_vlan_enabled(br_dev)) {
4144 br_vlan_get_proto(br_dev, &proto);
4145 if (proto == ETH_P_8021AD) {
4146 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4147 return -EOPNOTSUPP;
4148 }
4149 }
4150 }
4151 if (is_vlan_dev(upper_dev) &&
4152 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4153 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4154 return -EOPNOTSUPP;
4155 }
4156 break;
4157 case NETDEV_CHANGEUPPER:
4158 upper_dev = info->upper_dev;
4159 if (netif_is_bridge_master(upper_dev)) {
4160 if (info->linking)
4161 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4162 lower_dev,
4163 upper_dev,
4164 extack);
4165 else
4166 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4167 lower_dev,
4168 upper_dev);
4169 } else if (netif_is_lag_master(upper_dev)) {
4170 if (info->linking) {
4171 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4172 upper_dev, extack);
4173 } else {
4174 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4175 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4176 upper_dev);
4177 }
4178 } else if (netif_is_ovs_master(upper_dev)) {
4179 if (info->linking)
4180 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4181 else
4182 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4183 } else if (netif_is_macvlan(upper_dev)) {
4184 if (!info->linking)
4185 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4186 } else if (is_vlan_dev(upper_dev)) {
4187 struct net_device *br_dev;
4188
4189 if (!netif_is_bridge_port(upper_dev))
4190 break;
4191 if (info->linking)
4192 break;
4193 br_dev = netdev_master_upper_dev_get(upper_dev);
4194 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4195 br_dev);
4196 }
4197 break;
4198 }
4199
4200 return err;
4201}
4202
4203static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4204 unsigned long event, void *ptr)
4205{
4206 struct netdev_notifier_changelowerstate_info *info;
4207 struct mlxsw_sp_port *mlxsw_sp_port;
4208 int err;
4209
4210 mlxsw_sp_port = netdev_priv(dev);
4211 info = ptr;
4212
4213 switch (event) {
4214 case NETDEV_CHANGELOWERSTATE:
4215 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4216 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4217 info->lower_state_info);
4218 if (err)
4219 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4220 }
4221 break;
4222 }
4223
4224 return 0;
4225}
4226
4227static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4228 struct net_device *port_dev,
4229 unsigned long event, void *ptr)
4230{
4231 switch (event) {
4232 case NETDEV_PRECHANGEUPPER:
4233 case NETDEV_CHANGEUPPER:
4234 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4235 event, ptr);
4236 case NETDEV_CHANGELOWERSTATE:
4237 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4238 ptr);
4239 }
4240
4241 return 0;
4242}
4243
4244static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4245 unsigned long event, void *ptr)
4246{
4247 struct net_device *dev;
4248 struct list_head *iter;
4249 int ret;
4250
4251 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4252 if (mlxsw_sp_port_dev_check(dev)) {
4253 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4254 ptr);
4255 if (ret)
4256 return ret;
4257 }
4258 }
4259
4260 return 0;
4261}
4262
4263static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4264 struct net_device *dev,
4265 unsigned long event, void *ptr,
4266 u16 vid)
4267{
4268 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4270 struct netdev_notifier_changeupper_info *info = ptr;
4271 struct netlink_ext_ack *extack;
4272 struct net_device *upper_dev;
4273 int err = 0;
4274
4275 extack = netdev_notifier_info_to_extack(&info->info);
4276
4277 switch (event) {
4278 case NETDEV_PRECHANGEUPPER:
4279 upper_dev = info->upper_dev;
4280 if (!netif_is_bridge_master(upper_dev) &&
4281 !netif_is_macvlan(upper_dev)) {
4282 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4283 return -EINVAL;
4284 }
4285 if (!info->linking)
4286 break;
4287 if (netif_is_bridge_master(upper_dev) &&
4288 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4289 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4290 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4291 return -EOPNOTSUPP;
4292 if (netdev_has_any_upper_dev(upper_dev) &&
4293 (!netif_is_bridge_master(upper_dev) ||
4294 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4295 upper_dev))) {
4296 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4297 return -EINVAL;
4298 }
4299 if (netif_is_macvlan(upper_dev) &&
4300 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4301 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4302 return -EOPNOTSUPP;
4303 }
4304 break;
4305 case NETDEV_CHANGEUPPER:
4306 upper_dev = info->upper_dev;
4307 if (netif_is_bridge_master(upper_dev)) {
4308 if (info->linking)
4309 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4310 vlan_dev,
4311 upper_dev,
4312 extack);
4313 else
4314 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4315 vlan_dev,
4316 upper_dev);
4317 } else if (netif_is_macvlan(upper_dev)) {
4318 if (!info->linking)
4319 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4320 } else {
4321 err = -EINVAL;
4322 WARN_ON(1);
4323 }
4324 break;
4325 }
4326
4327 return err;
4328}
4329
4330static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4331 struct net_device *lag_dev,
4332 unsigned long event,
4333 void *ptr, u16 vid)
4334{
4335 struct net_device *dev;
4336 struct list_head *iter;
4337 int ret;
4338
4339 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4340 if (mlxsw_sp_port_dev_check(dev)) {
4341 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4342 event, ptr,
4343 vid);
4344 if (ret)
4345 return ret;
4346 }
4347 }
4348
4349 return 0;
4350}
4351
4352static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4353 struct net_device *br_dev,
4354 unsigned long event, void *ptr,
4355 u16 vid)
4356{
4357 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4358 struct netdev_notifier_changeupper_info *info = ptr;
4359 struct netlink_ext_ack *extack;
4360 struct net_device *upper_dev;
4361
4362 if (!mlxsw_sp)
4363 return 0;
4364
4365 extack = netdev_notifier_info_to_extack(&info->info);
4366
4367 switch (event) {
4368 case NETDEV_PRECHANGEUPPER:
4369 upper_dev = info->upper_dev;
4370 if (!netif_is_macvlan(upper_dev)) {
4371 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4372 return -EOPNOTSUPP;
4373 }
4374 if (!info->linking)
4375 break;
4376 if (netif_is_macvlan(upper_dev) &&
4377 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4378 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4379 return -EOPNOTSUPP;
4380 }
4381 break;
4382 case NETDEV_CHANGEUPPER:
4383 upper_dev = info->upper_dev;
4384 if (info->linking)
4385 break;
4386 if (netif_is_macvlan(upper_dev))
4387 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4388 break;
4389 }
4390
4391 return 0;
4392}
4393
4394static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4395 unsigned long event, void *ptr)
4396{
4397 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4398 u16 vid = vlan_dev_vlan_id(vlan_dev);
4399
4400 if (mlxsw_sp_port_dev_check(real_dev))
4401 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4402 event, ptr, vid);
4403 else if (netif_is_lag_master(real_dev))
4404 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4405 real_dev, event,
4406 ptr, vid);
4407 else if (netif_is_bridge_master(real_dev))
4408 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4409 event, ptr, vid);
4410
4411 return 0;
4412}
4413
4414static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4415 unsigned long event, void *ptr)
4416{
4417 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4418 struct netdev_notifier_changeupper_info *info = ptr;
4419 struct netlink_ext_ack *extack;
4420 struct net_device *upper_dev;
4421 u16 proto;
4422
4423 if (!mlxsw_sp)
4424 return 0;
4425
4426 extack = netdev_notifier_info_to_extack(&info->info);
4427
4428 switch (event) {
4429 case NETDEV_PRECHANGEUPPER:
4430 upper_dev = info->upper_dev;
4431 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4432 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4433 return -EOPNOTSUPP;
4434 }
4435 if (!info->linking)
4436 break;
4437 if (br_vlan_enabled(br_dev)) {
4438 br_vlan_get_proto(br_dev, &proto);
4439 if (proto == ETH_P_8021AD) {
4440 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
4441 return -EOPNOTSUPP;
4442 }
4443 }
4444 if (is_vlan_dev(upper_dev) &&
4445 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4446 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4447 return -EOPNOTSUPP;
4448 }
4449 if (netif_is_macvlan(upper_dev) &&
4450 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
4451 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4452 return -EOPNOTSUPP;
4453 }
4454 break;
4455 case NETDEV_CHANGEUPPER:
4456 upper_dev = info->upper_dev;
4457 if (info->linking)
4458 break;
4459 if (is_vlan_dev(upper_dev))
4460 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4461 if (netif_is_macvlan(upper_dev))
4462 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4463 break;
4464 }
4465
4466 return 0;
4467}
4468
4469static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4470 unsigned long event, void *ptr)
4471{
4472 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4473 struct netdev_notifier_changeupper_info *info = ptr;
4474 struct netlink_ext_ack *extack;
4475
4476 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4477 return 0;
4478
4479 extack = netdev_notifier_info_to_extack(&info->info);
4480
4481
4482 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4483
4484 return -EOPNOTSUPP;
4485}
4486
4487static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4488{
4489 struct netdev_notifier_changeupper_info *info = ptr;
4490
4491 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4492 return false;
4493 return netif_is_l3_master(info->upper_dev);
4494}
4495
4496static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
4497 struct net_device *dev,
4498 unsigned long event, void *ptr)
4499{
4500 struct netdev_notifier_changeupper_info *cu_info;
4501 struct netdev_notifier_info *info = ptr;
4502 struct netlink_ext_ack *extack;
4503 struct net_device *upper_dev;
4504
4505 extack = netdev_notifier_info_to_extack(info);
4506
4507 switch (event) {
4508 case NETDEV_CHANGEUPPER:
4509 cu_info = container_of(info,
4510 struct netdev_notifier_changeupper_info,
4511 info);
4512 upper_dev = cu_info->upper_dev;
4513 if (!netif_is_bridge_master(upper_dev))
4514 return 0;
4515 if (!mlxsw_sp_lower_get(upper_dev))
4516 return 0;
4517 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4518 return -EOPNOTSUPP;
4519 if (cu_info->linking) {
4520 if (!netif_running(dev))
4521 return 0;
4522
4523
4524
4525
4526 if (br_vlan_enabled(upper_dev))
4527 return 0;
4528 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
4529 dev, 0, extack);
4530 } else {
4531
4532
4533
4534 if (br_vlan_enabled(upper_dev))
4535 return 0;
4536 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4537 }
4538 break;
4539 case NETDEV_PRE_UP:
4540 upper_dev = netdev_master_upper_dev_get(dev);
4541 if (!upper_dev)
4542 return 0;
4543 if (!netif_is_bridge_master(upper_dev))
4544 return 0;
4545 if (!mlxsw_sp_lower_get(upper_dev))
4546 return 0;
4547 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
4548 extack);
4549 case NETDEV_DOWN:
4550 upper_dev = netdev_master_upper_dev_get(dev);
4551 if (!upper_dev)
4552 return 0;
4553 if (!netif_is_bridge_master(upper_dev))
4554 return 0;
4555 if (!mlxsw_sp_lower_get(upper_dev))
4556 return 0;
4557 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4558 break;
4559 }
4560
4561 return 0;
4562}
4563
4564static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4565 unsigned long event, void *ptr)
4566{
4567 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4568 struct mlxsw_sp_span_entry *span_entry;
4569 struct mlxsw_sp *mlxsw_sp;
4570 int err = 0;
4571
4572 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4573 if (event == NETDEV_UNREGISTER) {
4574 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4575 if (span_entry)
4576 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4577 }
4578 mlxsw_sp_span_respin(mlxsw_sp);
4579
4580 if (netif_is_vxlan(dev))
4581 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
4582 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4583 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4584 event, ptr);
4585 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4586 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4587 event, ptr);
4588 else if (event == NETDEV_PRE_CHANGEADDR ||
4589 event == NETDEV_CHANGEADDR ||
4590 event == NETDEV_CHANGEMTU)
4591 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
4592 else if (mlxsw_sp_is_vrf_event(event, ptr))
4593 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4594 else if (mlxsw_sp_port_dev_check(dev))
4595 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4596 else if (netif_is_lag_master(dev))
4597 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4598 else if (is_vlan_dev(dev))
4599 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4600 else if (netif_is_bridge_master(dev))
4601 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4602 else if (netif_is_macvlan(dev))
4603 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
4604
4605 return notifier_from_errno(err);
4606}
4607
4608static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4609 .notifier_call = mlxsw_sp_inetaddr_valid_event,
4610};
4611
4612static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4613 .notifier_call = mlxsw_sp_inet6addr_valid_event,
4614};
4615
4616static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
4617 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4618 {0, },
4619};
4620
4621static struct pci_driver mlxsw_sp1_pci_driver = {
4622 .name = mlxsw_sp1_driver_name,
4623 .id_table = mlxsw_sp1_pci_id_table,
4624};
4625
4626static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
4627 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
4628 {0, },
4629};
4630
4631static struct pci_driver mlxsw_sp2_pci_driver = {
4632 .name = mlxsw_sp2_driver_name,
4633 .id_table = mlxsw_sp2_pci_id_table,
4634};
4635
4636static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
4637 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
4638 {0, },
4639};
4640
4641static struct pci_driver mlxsw_sp3_pci_driver = {
4642 .name = mlxsw_sp3_driver_name,
4643 .id_table = mlxsw_sp3_pci_id_table,
4644};
4645
4646static int __init mlxsw_sp_module_init(void)
4647{
4648 int err;
4649
4650 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4651 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4652
4653 err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
4654 if (err)
4655 goto err_sp1_core_driver_register;
4656
4657 err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
4658 if (err)
4659 goto err_sp2_core_driver_register;
4660
4661 err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
4662 if (err)
4663 goto err_sp3_core_driver_register;
4664
4665 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
4666 if (err)
4667 goto err_sp1_pci_driver_register;
4668
4669 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
4670 if (err)
4671 goto err_sp2_pci_driver_register;
4672
4673 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
4674 if (err)
4675 goto err_sp3_pci_driver_register;
4676
4677 return 0;
4678
4679err_sp3_pci_driver_register:
4680 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4681err_sp2_pci_driver_register:
4682 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4683err_sp1_pci_driver_register:
4684 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4685err_sp3_core_driver_register:
4686 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4687err_sp2_core_driver_register:
4688 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4689err_sp1_core_driver_register:
4690 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4691 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4692 return err;
4693}
4694
4695static void __exit mlxsw_sp_module_exit(void)
4696{
4697 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
4698 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4699 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4700 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4701 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4702 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4703 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4704 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4705}
4706
4707module_init(mlxsw_sp_module_init);
4708module_exit(mlxsw_sp_module_exit);
4709
4710MODULE_LICENSE("Dual BSD/GPL");
4711MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4712MODULE_DESCRIPTION("Mellanox Spectrum driver");
4713MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
4714MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
4715MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
4716MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
4717MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
4718MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
4719