1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/types.h>
7#include <linux/pci.h>
8#include <linux/netdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/ethtool.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/skbuff.h>
14#include <linux/if_vlan.h>
15#include <linux/if_bridge.h>
16#include <linux/workqueue.h>
17#include <linux/jiffies.h>
18#include <linux/bitops.h>
19#include <linux/list.h>
20#include <linux/notifier.h>
21#include <linux/dcbnl.h>
22#include <linux/inetdevice.h>
23#include <linux/netlink.h>
24#include <linux/jhash.h>
25#include <linux/log2.h>
26#include <net/switchdev.h>
27#include <net/pkt_cls.h>
28#include <net/netevent.h>
29#include <net/addrconf.h>
30
31#include "spectrum.h"
32#include "pci.h"
33#include "core.h"
34#include "core_env.h"
35#include "reg.h"
36#include "port.h"
37#include "trap.h"
38#include "txheader.h"
39#include "spectrum_cnt.h"
40#include "spectrum_dpipe.h"
41#include "spectrum_acl_flex_actions.h"
42#include "spectrum_span.h"
43#include "spectrum_ptp.h"
44#include "spectrum_trap.h"
45#include "../mlxfw/mlxfw.h"
46
47#define MLXSW_SP1_FWREV_MAJOR 13
48#define MLXSW_SP1_FWREV_MINOR 2007
49#define MLXSW_SP1_FWREV_SUBMINOR 1168
50#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
51
52static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
53 .major = MLXSW_SP1_FWREV_MAJOR,
54 .minor = MLXSW_SP1_FWREV_MINOR,
55 .subminor = MLXSW_SP1_FWREV_SUBMINOR,
56 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
57};
58
59#define MLXSW_SP1_FW_FILENAME \
60 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
61 "." __stringify(MLXSW_SP1_FWREV_MINOR) \
62 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
63
64#define MLXSW_SP2_FWREV_MAJOR 29
65#define MLXSW_SP2_FWREV_MINOR 2007
66#define MLXSW_SP2_FWREV_SUBMINOR 1168
67
68static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
69 .major = MLXSW_SP2_FWREV_MAJOR,
70 .minor = MLXSW_SP2_FWREV_MINOR,
71 .subminor = MLXSW_SP2_FWREV_SUBMINOR,
72};
73
74#define MLXSW_SP2_FW_FILENAME \
75 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
76 "." __stringify(MLXSW_SP2_FWREV_MINOR) \
77 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
78
79#define MLXSW_SP3_FWREV_MAJOR 30
80#define MLXSW_SP3_FWREV_MINOR 2007
81#define MLXSW_SP3_FWREV_SUBMINOR 1168
82
83static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
84 .major = MLXSW_SP3_FWREV_MAJOR,
85 .minor = MLXSW_SP3_FWREV_MINOR,
86 .subminor = MLXSW_SP3_FWREV_SUBMINOR,
87};
88
89#define MLXSW_SP3_FW_FILENAME \
90 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
91 "." __stringify(MLXSW_SP3_FWREV_MINOR) \
92 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
93
94static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
95static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
96static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
97
98static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
99 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
100};
101static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
102 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
103};
104
105
106
107
108
109MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
110
111
112
113
114
115
116MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
117
118
119
120
121MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
122
123
124
125
126MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
127
128
129
130
131
132MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
133
134
135
136
137MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
138
139
140
141
142
143MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
144
145
146
147
148MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
149
150
151
152
153
154
155
156
157
158MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
159
160
161
162
163
164
165MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
166
167
168
169
170
171MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
172
173struct mlxsw_sp_mlxfw_dev {
174 struct mlxfw_dev mlxfw_dev;
175 struct mlxsw_sp *mlxsw_sp;
176};
177
178static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
179 u16 component_index, u32 *p_max_size,
180 u8 *p_align_bits, u16 *p_max_write_size)
181{
182 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
183 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
184 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
185 char mcqi_pl[MLXSW_REG_MCQI_LEN];
186 int err;
187
188 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
189 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
190 if (err)
191 return err;
192 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
193 p_max_write_size);
194
195 *p_align_bits = max_t(u8, *p_align_bits, 2);
196 *p_max_write_size = min_t(u16, *p_max_write_size,
197 MLXSW_REG_MCDA_MAX_DATA_LEN);
198 return 0;
199}
200
201static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
202{
203 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
204 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
205 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
206 char mcc_pl[MLXSW_REG_MCC_LEN];
207 u8 control_state;
208 int err;
209
210 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
211 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
212 if (err)
213 return err;
214
215 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
216 if (control_state != MLXFW_FSM_STATE_IDLE)
217 return -EBUSY;
218
219 mlxsw_reg_mcc_pack(mcc_pl,
220 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
221 0, *fwhandle, 0);
222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
223}
224
225static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
226 u32 fwhandle, u16 component_index,
227 u32 component_size)
228{
229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
232 char mcc_pl[MLXSW_REG_MCC_LEN];
233
234 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
235 component_index, fwhandle, component_size);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
237}
238
239static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
240 u32 fwhandle, u8 *data, u16 size,
241 u32 offset)
242{
243 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
244 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
245 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
246 char mcda_pl[MLXSW_REG_MCDA_LEN];
247
248 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
249 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
250}
251
252static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
253 u32 fwhandle, u16 component_index)
254{
255 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
256 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
257 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
258 char mcc_pl[MLXSW_REG_MCC_LEN];
259
260 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
261 component_index, fwhandle, 0);
262 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
263}
264
265static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
266{
267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
270 char mcc_pl[MLXSW_REG_MCC_LEN];
271
272 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
273 fwhandle, 0);
274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
275}
276
277static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
278 enum mlxfw_fsm_state *fsm_state,
279 enum mlxfw_fsm_state_err *fsm_state_err)
280{
281 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
282 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
284 char mcc_pl[MLXSW_REG_MCC_LEN];
285 u8 control_state;
286 u8 error_code;
287 int err;
288
289 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
290 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
291 if (err)
292 return err;
293
294 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
295 *fsm_state = control_state;
296 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
297 MLXFW_FSM_STATE_ERR_MAX);
298 return 0;
299}
300
301static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
302{
303 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
304 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
306 char mcc_pl[MLXSW_REG_MCC_LEN];
307
308 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
309 fwhandle, 0);
310 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
311}
312
313static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
314{
315 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
316 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
317 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
318 char mcc_pl[MLXSW_REG_MCC_LEN];
319
320 mlxsw_reg_mcc_pack(mcc_pl,
321 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
322 fwhandle, 0);
323 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
324}
325
326static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
327 .component_query = mlxsw_sp_component_query,
328 .fsm_lock = mlxsw_sp_fsm_lock,
329 .fsm_component_update = mlxsw_sp_fsm_component_update,
330 .fsm_block_download = mlxsw_sp_fsm_block_download,
331 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
332 .fsm_activate = mlxsw_sp_fsm_activate,
333 .fsm_query_state = mlxsw_sp_fsm_query_state,
334 .fsm_cancel = mlxsw_sp_fsm_cancel,
335 .fsm_release = mlxsw_sp_fsm_release,
336};
337
338static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
339 const struct firmware *firmware,
340 struct netlink_ext_ack *extack)
341{
342 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
343 .mlxfw_dev = {
344 .ops = &mlxsw_sp_mlxfw_dev_ops,
345 .psid = mlxsw_sp->bus_info->psid,
346 .psid_size = strlen(mlxsw_sp->bus_info->psid),
347 .devlink = priv_to_devlink(mlxsw_sp->core),
348 },
349 .mlxsw_sp = mlxsw_sp
350 };
351 int err;
352
353 mlxsw_core_fw_flash_start(mlxsw_sp->core);
354 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev,
355 firmware, extack);
356 mlxsw_core_fw_flash_end(mlxsw_sp->core);
357
358 return err;
359}
360
361static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
362{
363 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
364 const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
365 const char *fw_filename = mlxsw_sp->fw_filename;
366 union devlink_param_value value;
367 const struct firmware *firmware;
368 int err;
369
370
371 if (!req_rev || !fw_filename)
372 return 0;
373
374
375 err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
376 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
377 &value);
378 if (err)
379 return err;
380 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
381 return 0;
382
383
384 if (rev->major != req_rev->major) {
385 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
386 rev->major, req_rev->major);
387 return -EINVAL;
388 }
389 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
390 return 0;
391
392 dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
393 rev->major, rev->minor, rev->subminor, req_rev->major,
394 req_rev->minor, req_rev->subminor);
395 dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
396 fw_filename);
397
398 err = request_firmware_direct(&firmware, fw_filename,
399 mlxsw_sp->bus_info->dev);
400 if (err) {
401 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
402 fw_filename);
403 return err;
404 }
405
406 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL);
407 release_firmware(firmware);
408 if (err)
409 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
410
411
412
413
414 if (rev->minor >= req_rev->can_reset_minor)
415 return err ? err : -EAGAIN;
416 else
417 return 0;
418}
419
420static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core,
421 const char *file_name, const char *component,
422 struct netlink_ext_ack *extack)
423{
424 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
425 const struct firmware *firmware;
426 int err;
427
428 if (component)
429 return -EOPNOTSUPP;
430
431 err = request_firmware_direct(&firmware, file_name,
432 mlxsw_sp->bus_info->dev);
433 if (err)
434 return err;
435 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack);
436 release_firmware(firmware);
437
438 return err;
439}
440
441int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
442 unsigned int counter_index, u64 *packets,
443 u64 *bytes)
444{
445 char mgpc_pl[MLXSW_REG_MGPC_LEN];
446 int err;
447
448 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
449 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
450 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
451 if (err)
452 return err;
453 if (packets)
454 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
455 if (bytes)
456 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
457 return 0;
458}
459
460static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
461 unsigned int counter_index)
462{
463 char mgpc_pl[MLXSW_REG_MGPC_LEN];
464
465 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
466 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
467 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
468}
469
470int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
471 unsigned int *p_counter_index)
472{
473 int err;
474
475 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
476 p_counter_index);
477 if (err)
478 return err;
479 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
480 if (err)
481 goto err_counter_clear;
482 return 0;
483
484err_counter_clear:
485 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
486 *p_counter_index);
487 return err;
488}
489
490void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
491 unsigned int counter_index)
492{
493 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
494 counter_index);
495}
496
497static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
498 const struct mlxsw_tx_info *tx_info)
499{
500 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
501
502 memset(txhdr, 0, MLXSW_TXHDR_LEN);
503
504 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
505 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
506 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
507 mlxsw_tx_hdr_swid_set(txhdr, 0);
508 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
509 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
510 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
511}
512
513enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
514{
515 switch (state) {
516 case BR_STATE_FORWARDING:
517 return MLXSW_REG_SPMS_STATE_FORWARDING;
518 case BR_STATE_LEARNING:
519 return MLXSW_REG_SPMS_STATE_LEARNING;
520 case BR_STATE_LISTENING:
521 case BR_STATE_DISABLED:
522 case BR_STATE_BLOCKING:
523 return MLXSW_REG_SPMS_STATE_DISCARDING;
524 default:
525 BUG();
526 }
527}
528
529int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
530 u8 state)
531{
532 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
533 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
534 char *spms_pl;
535 int err;
536
537 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
538 if (!spms_pl)
539 return -ENOMEM;
540 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
541 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
542
543 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
544 kfree(spms_pl);
545 return err;
546}
547
548static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
549{
550 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
551 int err;
552
553 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
554 if (err)
555 return err;
556 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
557 return 0;
558}
559
560int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
561 bool is_up)
562{
563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
564 char paos_pl[MLXSW_REG_PAOS_LEN];
565
566 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
567 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
568 MLXSW_PORT_ADMIN_STATUS_DOWN);
569 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
570}
571
572static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
573 unsigned char *addr)
574{
575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
576 char ppad_pl[MLXSW_REG_PPAD_LEN];
577
578 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
579 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
580 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
581}
582
583static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
584{
585 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
586 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
587
588 ether_addr_copy(addr, mlxsw_sp->base_mac);
589 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
590 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
591}
592
593static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
594{
595 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
596 char pmtu_pl[MLXSW_REG_PMTU_LEN];
597 int max_mtu;
598 int err;
599
600 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
601 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
602 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
603 if (err)
604 return err;
605 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
606
607 if (mtu > max_mtu)
608 return -EINVAL;
609
610 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
611 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
612}
613
614static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
615{
616 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
617 char pspa_pl[MLXSW_REG_PSPA_LEN];
618
619 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
620 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
621}
622
623int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
624{
625 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
626 char svpe_pl[MLXSW_REG_SVPE_LEN];
627
628 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
629 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
630}
631
632int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
633 bool learn_enable)
634{
635 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
636 char *spvmlr_pl;
637 int err;
638
639 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
640 if (!spvmlr_pl)
641 return -ENOMEM;
642 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
643 learn_enable);
644 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
645 kfree(spvmlr_pl);
646 return err;
647}
648
649static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
650 u16 vid)
651{
652 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
653 char spvid_pl[MLXSW_REG_SPVID_LEN];
654
655 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
656 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
657}
658
659static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
660 bool allow)
661{
662 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
663 char spaft_pl[MLXSW_REG_SPAFT_LEN];
664
665 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
666 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
667}
668
669int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
670{
671 int err;
672
673 if (!vid) {
674 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
675 if (err)
676 return err;
677 } else {
678 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
679 if (err)
680 return err;
681 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
682 if (err)
683 goto err_port_allow_untagged_set;
684 }
685
686 mlxsw_sp_port->pvid = vid;
687 return 0;
688
689err_port_allow_untagged_set:
690 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
691 return err;
692}
693
694static int
695mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
696{
697 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
698 char sspr_pl[MLXSW_REG_SSPR_LEN];
699
700 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
701 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
702}
703
704static int
705mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
706 struct mlxsw_sp_port_mapping *port_mapping)
707{
708 char pmlp_pl[MLXSW_REG_PMLP_LEN];
709 bool separate_rxtx;
710 u8 module;
711 u8 width;
712 int err;
713 int i;
714
715 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
716 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
717 if (err)
718 return err;
719 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
720 width = mlxsw_reg_pmlp_width_get(pmlp_pl);
721 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
722
723 if (width && !is_power_of_2(width)) {
724 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
725 local_port);
726 return -EINVAL;
727 }
728
729 for (i = 0; i < width; i++) {
730 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
731 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
732 local_port);
733 return -EINVAL;
734 }
735 if (separate_rxtx &&
736 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
737 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
738 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
739 local_port);
740 return -EINVAL;
741 }
742 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
743 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
744 local_port);
745 return -EINVAL;
746 }
747 }
748
749 port_mapping->module = module;
750 port_mapping->width = width;
751 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
752 return 0;
753}
754
755static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
756{
757 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
758 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
759 char pmlp_pl[MLXSW_REG_PMLP_LEN];
760 int i;
761
762 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
763 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
764 for (i = 0; i < port_mapping->width; i++) {
765 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
766 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i);
767 }
768
769 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
770}
771
772static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
773{
774 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
775 char pmlp_pl[MLXSW_REG_PMLP_LEN];
776
777 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
778 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
779 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
780}
781
782static int mlxsw_sp_port_open(struct net_device *dev)
783{
784 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
785 int err;
786
787 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
788 if (err)
789 return err;
790 netif_start_queue(dev);
791 return 0;
792}
793
794static int mlxsw_sp_port_stop(struct net_device *dev)
795{
796 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
797
798 netif_stop_queue(dev);
799 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
800}
801
802static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
803 struct net_device *dev)
804{
805 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
806 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
807 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
808 const struct mlxsw_tx_info tx_info = {
809 .local_port = mlxsw_sp_port->local_port,
810 .is_emad = false,
811 };
812 u64 len;
813 int err;
814
815 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
816 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
817 dev_kfree_skb_any(skb);
818 return NETDEV_TX_OK;
819 }
820
821 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
822
823 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
824 return NETDEV_TX_BUSY;
825
826 if (eth_skb_pad(skb)) {
827 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
828 return NETDEV_TX_OK;
829 }
830
831 mlxsw_sp_txhdr_construct(skb, &tx_info);
832
833
834
835 len = skb->len - MLXSW_TXHDR_LEN;
836
837
838
839
840 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
841
842 if (!err) {
843 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
844 u64_stats_update_begin(&pcpu_stats->syncp);
845 pcpu_stats->tx_packets++;
846 pcpu_stats->tx_bytes += len;
847 u64_stats_update_end(&pcpu_stats->syncp);
848 } else {
849 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
850 dev_kfree_skb_any(skb);
851 }
852 return NETDEV_TX_OK;
853}
854
855static void mlxsw_sp_set_rx_mode(struct net_device *dev)
856{
857}
858
859static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
860{
861 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
862 struct sockaddr *addr = p;
863 int err;
864
865 if (!is_valid_ether_addr(addr->sa_data))
866 return -EADDRNOTAVAIL;
867
868 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
869 if (err)
870 return err;
871 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
872 return 0;
873}
874
875static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
876 int mtu)
877{
878 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
879}
880
881#define MLXSW_SP_CELL_FACTOR 2
882
883static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
884 u16 delay)
885{
886 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
887 BITS_PER_BYTE));
888 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
889 mtu);
890}
891
892
893
894
895#define MLXSW_SP_PAUSE_DELAY 58752
896
897static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
898 u16 delay, bool pfc, bool pause)
899{
900 if (pfc)
901 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
902 else if (pause)
903 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
904 else
905 return 0;
906}
907
908static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
909 bool lossy)
910{
911 if (lossy)
912 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
913 else
914 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
915 thres);
916}
917
918int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
919 u8 *prio_tc, bool pause_en,
920 struct ieee_pfc *my_pfc)
921{
922 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
923 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
924 u16 delay = !!my_pfc ? my_pfc->delay : 0;
925 char pbmc_pl[MLXSW_REG_PBMC_LEN];
926 u32 taken_headroom_cells = 0;
927 u32 max_headroom_cells;
928 int i, j, err;
929
930 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp);
931
932 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
933 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
934 if (err)
935 return err;
936
937 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
938 bool configure = false;
939 bool pfc = false;
940 u16 thres_cells;
941 u16 delay_cells;
942 u16 total_cells;
943 bool lossy;
944
945 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
946 if (prio_tc[j] == i) {
947 pfc = pfc_en & BIT(j);
948 configure = true;
949 break;
950 }
951 }
952
953 if (!configure)
954 continue;
955
956 lossy = !(pfc || pause_en);
957 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
958 thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
959 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
960 pfc, pause_en);
961 delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
962 total_cells = thres_cells + delay_cells;
963
964 taken_headroom_cells += total_cells;
965 if (taken_headroom_cells > max_headroom_cells)
966 return -ENOBUFS;
967
968 mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells,
969 thres_cells, lossy);
970 }
971
972 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
973}
974
975int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
976 int mtu, bool pause_en)
977{
978 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
979 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
980 struct ieee_pfc *my_pfc;
981 u8 *prio_tc;
982
983 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
984 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
985
986 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
987 pause_en, my_pfc);
988}
989
990static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
991{
992 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
993 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
994 int err;
995
996 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
997 if (err)
998 return err;
999 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1000 if (err)
1001 goto err_span_port_mtu_update;
1002 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1003 if (err)
1004 goto err_port_mtu_set;
1005 dev->mtu = mtu;
1006 return 0;
1007
1008err_port_mtu_set:
1009 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1010err_span_port_mtu_update:
1011 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1012 return err;
1013}
1014
1015static int
1016mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1017 struct rtnl_link_stats64 *stats)
1018{
1019 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1020 struct mlxsw_sp_port_pcpu_stats *p;
1021 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1022 u32 tx_dropped = 0;
1023 unsigned int start;
1024 int i;
1025
1026 for_each_possible_cpu(i) {
1027 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1028 do {
1029 start = u64_stats_fetch_begin_irq(&p->syncp);
1030 rx_packets = p->rx_packets;
1031 rx_bytes = p->rx_bytes;
1032 tx_packets = p->tx_packets;
1033 tx_bytes = p->tx_bytes;
1034 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1035
1036 stats->rx_packets += rx_packets;
1037 stats->rx_bytes += rx_bytes;
1038 stats->tx_packets += tx_packets;
1039 stats->tx_bytes += tx_bytes;
1040
1041 tx_dropped += p->tx_dropped;
1042 }
1043 stats->tx_dropped = tx_dropped;
1044 return 0;
1045}
1046
1047static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
1048{
1049 switch (attr_id) {
1050 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1051 return true;
1052 }
1053
1054 return false;
1055}
1056
1057static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1058 void *sp)
1059{
1060 switch (attr_id) {
1061 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1062 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1063 }
1064
1065 return -EINVAL;
1066}
1067
1068int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1069 int prio, char *ppcnt_pl)
1070{
1071 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1072 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1073
1074 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1075 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1076}
1077
1078static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1079 struct rtnl_link_stats64 *stats)
1080{
1081 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1082 int err;
1083
1084 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1085 0, ppcnt_pl);
1086 if (err)
1087 goto out;
1088
1089 stats->tx_packets =
1090 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1091 stats->rx_packets =
1092 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1093 stats->tx_bytes =
1094 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1095 stats->rx_bytes =
1096 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1097 stats->multicast =
1098 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1099
1100 stats->rx_crc_errors =
1101 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1102 stats->rx_frame_errors =
1103 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1104
1105 stats->rx_length_errors = (
1106 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1107 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1108 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1109
1110 stats->rx_errors = (stats->rx_crc_errors +
1111 stats->rx_frame_errors + stats->rx_length_errors);
1112
1113out:
1114 return err;
1115}
1116
1117static void
1118mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
1119 struct mlxsw_sp_port_xstats *xstats)
1120{
1121 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1122 int err, i;
1123
1124 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
1125 ppcnt_pl);
1126 if (!err)
1127 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
1128
1129 for (i = 0; i < TC_MAX_QUEUE; i++) {
1130 err = mlxsw_sp_port_get_stats_raw(dev,
1131 MLXSW_REG_PPCNT_TC_CONG_TC,
1132 i, ppcnt_pl);
1133 if (!err)
1134 xstats->wred_drop[i] =
1135 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
1136
1137 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
1138 i, ppcnt_pl);
1139 if (err)
1140 continue;
1141
1142 xstats->backlog[i] =
1143 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1144 xstats->tail_drop[i] =
1145 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
1146 }
1147
1148 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1149 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
1150 i, ppcnt_pl);
1151 if (err)
1152 continue;
1153
1154 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
1155 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
1156 }
1157}
1158
1159static void update_stats_cache(struct work_struct *work)
1160{
1161 struct mlxsw_sp_port *mlxsw_sp_port =
1162 container_of(work, struct mlxsw_sp_port,
1163 periodic_hw_stats.update_dw.work);
1164
1165 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1166
1167
1168
1169 goto out;
1170
1171 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1172 &mlxsw_sp_port->periodic_hw_stats.stats);
1173 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
1174 &mlxsw_sp_port->periodic_hw_stats.xstats);
1175
1176out:
1177 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1178 MLXSW_HW_STATS_UPDATE_TIME);
1179}
1180
1181
1182
1183
1184static void
1185mlxsw_sp_port_get_stats64(struct net_device *dev,
1186 struct rtnl_link_stats64 *stats)
1187{
1188 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1189
1190 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1191}
1192
1193static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1194 u16 vid_begin, u16 vid_end,
1195 bool is_member, bool untagged)
1196{
1197 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1198 char *spvm_pl;
1199 int err;
1200
1201 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1202 if (!spvm_pl)
1203 return -ENOMEM;
1204
1205 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1206 vid_end, is_member, untagged);
1207 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1208 kfree(spvm_pl);
1209 return err;
1210}
1211
1212int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1213 u16 vid_end, bool is_member, bool untagged)
1214{
1215 u16 vid, vid_e;
1216 int err;
1217
1218 for (vid = vid_begin; vid <= vid_end;
1219 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1220 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1221 vid_end);
1222
1223 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1224 is_member, untagged);
1225 if (err)
1226 return err;
1227 }
1228
1229 return 0;
1230}
1231
1232static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1233 bool flush_default)
1234{
1235 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1236
1237 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1238 &mlxsw_sp_port->vlans_list, list) {
1239 if (!flush_default &&
1240 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1241 continue;
1242 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1243 }
1244}
1245
1246static void
1247mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1248{
1249 if (mlxsw_sp_port_vlan->bridge_port)
1250 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1251 else if (mlxsw_sp_port_vlan->fid)
1252 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1253}
1254
1255struct mlxsw_sp_port_vlan *
1256mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1257{
1258 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1259 bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1260 int err;
1261
1262 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1263 if (mlxsw_sp_port_vlan)
1264 return ERR_PTR(-EEXIST);
1265
1266 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1267 if (err)
1268 return ERR_PTR(err);
1269
1270 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1271 if (!mlxsw_sp_port_vlan) {
1272 err = -ENOMEM;
1273 goto err_port_vlan_alloc;
1274 }
1275
1276 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1277 mlxsw_sp_port_vlan->vid = vid;
1278 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1279
1280 return mlxsw_sp_port_vlan;
1281
1282err_port_vlan_alloc:
1283 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1284 return ERR_PTR(err);
1285}
1286
1287void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1288{
1289 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1290 u16 vid = mlxsw_sp_port_vlan->vid;
1291
1292 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1293 list_del(&mlxsw_sp_port_vlan->list);
1294 kfree(mlxsw_sp_port_vlan);
1295 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1296}
1297
1298static int mlxsw_sp_port_add_vid(struct net_device *dev,
1299 __be16 __always_unused proto, u16 vid)
1300{
1301 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1302
1303
1304
1305
1306 if (!vid)
1307 return 0;
1308
1309 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1310}
1311
1312static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1313 __be16 __always_unused proto, u16 vid)
1314{
1315 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1316 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1317
1318
1319
1320
1321 if (!vid)
1322 return 0;
1323
1324 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1325 if (!mlxsw_sp_port_vlan)
1326 return 0;
1327 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1328
1329 return 0;
1330}
1331
1332static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1333 struct flow_block_offload *f)
1334{
1335 switch (f->binder_type) {
1336 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1337 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1338 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1339 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1340 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1341 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1342 default:
1343 return -EOPNOTSUPP;
1344 }
1345}
1346
1347static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1348 void *type_data)
1349{
1350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1351
1352 switch (type) {
1353 case TC_SETUP_BLOCK:
1354 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1355 case TC_SETUP_QDISC_RED:
1356 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1357 case TC_SETUP_QDISC_PRIO:
1358 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1359 case TC_SETUP_QDISC_ETS:
1360 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1361 case TC_SETUP_QDISC_TBF:
1362 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1363 case TC_SETUP_QDISC_FIFO:
1364 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1365 default:
1366 return -EOPNOTSUPP;
1367 }
1368}
1369
1370static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1371{
1372 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1373
1374 if (!enable) {
1375 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1376 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1377 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1378 return -EINVAL;
1379 }
1380 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1381 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1382 } else {
1383 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1384 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1385 }
1386 return 0;
1387}
1388
1389static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1390{
1391 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1392 char pplr_pl[MLXSW_REG_PPLR_LEN];
1393 int err;
1394
1395 if (netif_running(dev))
1396 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1397
1398 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1399 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1400 pplr_pl);
1401
1402 if (netif_running(dev))
1403 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1404
1405 return err;
1406}
1407
1408typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1409
1410static int mlxsw_sp_handle_feature(struct net_device *dev,
1411 netdev_features_t wanted_features,
1412 netdev_features_t feature,
1413 mlxsw_sp_feature_handler feature_handler)
1414{
1415 netdev_features_t changes = wanted_features ^ dev->features;
1416 bool enable = !!(wanted_features & feature);
1417 int err;
1418
1419 if (!(changes & feature))
1420 return 0;
1421
1422 err = feature_handler(dev, enable);
1423 if (err) {
1424 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1425 enable ? "Enable" : "Disable", &feature, err);
1426 return err;
1427 }
1428
1429 if (enable)
1430 dev->features |= feature;
1431 else
1432 dev->features &= ~feature;
1433
1434 return 0;
1435}
1436static int mlxsw_sp_set_features(struct net_device *dev,
1437 netdev_features_t features)
1438{
1439 netdev_features_t oper_features = dev->features;
1440 int err = 0;
1441
1442 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1443 mlxsw_sp_feature_hw_tc);
1444 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1445 mlxsw_sp_feature_loopback);
1446
1447 if (err) {
1448 dev->features = oper_features;
1449 return -EINVAL;
1450 }
1451
1452 return 0;
1453}
1454
1455static struct devlink_port *
1456mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1457{
1458 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1459 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1460
1461 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1462 mlxsw_sp_port->local_port);
1463}
1464
1465static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1466 struct ifreq *ifr)
1467{
1468 struct hwtstamp_config config;
1469 int err;
1470
1471 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1472 return -EFAULT;
1473
1474 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1475 &config);
1476 if (err)
1477 return err;
1478
1479 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1480 return -EFAULT;
1481
1482 return 0;
1483}
1484
1485static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1486 struct ifreq *ifr)
1487{
1488 struct hwtstamp_config config;
1489 int err;
1490
1491 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1492 &config);
1493 if (err)
1494 return err;
1495
1496 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1497 return -EFAULT;
1498
1499 return 0;
1500}
1501
1502static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1503{
1504 struct hwtstamp_config config = {0};
1505
1506 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1507}
1508
1509static int
1510mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1511{
1512 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1513
1514 switch (cmd) {
1515 case SIOCSHWTSTAMP:
1516 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1517 case SIOCGHWTSTAMP:
1518 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1519 default:
1520 return -EOPNOTSUPP;
1521 }
1522}
1523
1524static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1525 .ndo_open = mlxsw_sp_port_open,
1526 .ndo_stop = mlxsw_sp_port_stop,
1527 .ndo_start_xmit = mlxsw_sp_port_xmit,
1528 .ndo_setup_tc = mlxsw_sp_setup_tc,
1529 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1530 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1531 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1532 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1533 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1534 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1535 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1536 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1537 .ndo_set_features = mlxsw_sp_set_features,
1538 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
1539 .ndo_do_ioctl = mlxsw_sp_port_ioctl,
1540};
1541
1542static int
1543mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1544{
1545 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1546 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1547 const struct mlxsw_sp_port_type_speed_ops *ops;
1548 char ptys_pl[MLXSW_REG_PTYS_LEN];
1549 int err;
1550
1551 ops = mlxsw_sp->port_type_speed_ops;
1552
1553
1554 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1555 0, false);
1556 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1557 if (err)
1558 return err;
1559
1560 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap,
1561 ð_proto_admin, ð_proto_oper);
1562 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1563 eth_proto_cap, mlxsw_sp_port->link.autoneg);
1564 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1565}
1566
1567int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1568{
1569 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1570 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1571 char ptys_pl[MLXSW_REG_PTYS_LEN];
1572 u32 eth_proto_oper;
1573 int err;
1574
1575 port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1576 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1577 mlxsw_sp_port->local_port, 0,
1578 false);
1579 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1580 if (err)
1581 return err;
1582 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1583 ð_proto_oper);
1584 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1585 return 0;
1586}
1587
1588int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1589 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1590 bool dwrr, u8 dwrr_weight)
1591{
1592 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1593 char qeec_pl[MLXSW_REG_QEEC_LEN];
1594
1595 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1596 next_index);
1597 mlxsw_reg_qeec_de_set(qeec_pl, true);
1598 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1599 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1600 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1601}
1602
1603int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1604 enum mlxsw_reg_qeec_hr hr, u8 index,
1605 u8 next_index, u32 maxrate, u8 burst_size)
1606{
1607 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1608 char qeec_pl[MLXSW_REG_QEEC_LEN];
1609
1610 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1611 next_index);
1612 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1613 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1614 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1616}
1617
1618static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1619 enum mlxsw_reg_qeec_hr hr, u8 index,
1620 u8 next_index, u32 minrate)
1621{
1622 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1623 char qeec_pl[MLXSW_REG_QEEC_LEN];
1624
1625 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1626 next_index);
1627 mlxsw_reg_qeec_mise_set(qeec_pl, true);
1628 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1629
1630 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1631}
1632
1633int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1634 u8 switch_prio, u8 tclass)
1635{
1636 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1637 char qtct_pl[MLXSW_REG_QTCT_LEN];
1638
1639 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1640 tclass);
1641 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1642}
1643
1644static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1645{
1646 int err, i;
1647
1648
1649
1650
1651 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1652 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1653 if (err)
1654 return err;
1655 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1656 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1657 MLXSW_REG_QEEC_HR_SUBGROUP, i,
1658 0, false, 0);
1659 if (err)
1660 return err;
1661 }
1662 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1663 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1664 MLXSW_REG_QEEC_HR_TC, i, i,
1665 false, 0);
1666 if (err)
1667 return err;
1668
1669 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1670 MLXSW_REG_QEEC_HR_TC,
1671 i + 8, i,
1672 true, 100);
1673 if (err)
1674 return err;
1675 }
1676
1677
1678
1679
1680
1681 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1682 MLXSW_REG_QEEC_HR_PORT, 0, 0,
1683 MLXSW_REG_QEEC_MAS_DIS, 0);
1684 if (err)
1685 return err;
1686 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1687 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1688 MLXSW_REG_QEEC_HR_SUBGROUP,
1689 i, 0,
1690 MLXSW_REG_QEEC_MAS_DIS, 0);
1691 if (err)
1692 return err;
1693 }
1694 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1695 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1696 MLXSW_REG_QEEC_HR_TC,
1697 i, i,
1698 MLXSW_REG_QEEC_MAS_DIS, 0);
1699 if (err)
1700 return err;
1701
1702 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1703 MLXSW_REG_QEEC_HR_TC,
1704 i + 8, i,
1705 MLXSW_REG_QEEC_MAS_DIS, 0);
1706 if (err)
1707 return err;
1708 }
1709
1710
1711 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1712 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1713 MLXSW_REG_QEEC_HR_TC,
1714 i + 8, i,
1715 MLXSW_REG_QEEC_MIS_MIN);
1716 if (err)
1717 return err;
1718 }
1719
1720
1721 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1722 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1723 if (err)
1724 return err;
1725 }
1726
1727 return 0;
1728}
1729
1730static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1731 bool enable)
1732{
1733 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1734 char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1735
1736 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1737 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1738}
1739
1740static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1741 u8 split_base_local_port,
1742 struct mlxsw_sp_port_mapping *port_mapping)
1743{
1744 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1745 bool split = !!split_base_local_port;
1746 struct mlxsw_sp_port *mlxsw_sp_port;
1747 u32 lanes = port_mapping->width;
1748 struct net_device *dev;
1749 bool splittable;
1750 int err;
1751
1752 splittable = lanes > 1 && !split;
1753 err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
1754 port_mapping->module + 1, split,
1755 port_mapping->lane / lanes,
1756 splittable, lanes,
1757 mlxsw_sp->base_mac,
1758 sizeof(mlxsw_sp->base_mac));
1759 if (err) {
1760 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1761 local_port);
1762 return err;
1763 }
1764
1765 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1766 if (!dev) {
1767 err = -ENOMEM;
1768 goto err_alloc_etherdev;
1769 }
1770 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1771 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1772 mlxsw_sp_port = netdev_priv(dev);
1773 mlxsw_sp_port->dev = dev;
1774 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1775 mlxsw_sp_port->local_port = local_port;
1776 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1777 mlxsw_sp_port->split = split;
1778 mlxsw_sp_port->split_base_local_port = split_base_local_port;
1779 mlxsw_sp_port->mapping = *port_mapping;
1780 mlxsw_sp_port->link.autoneg = 1;
1781 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1782
1783 mlxsw_sp_port->pcpu_stats =
1784 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1785 if (!mlxsw_sp_port->pcpu_stats) {
1786 err = -ENOMEM;
1787 goto err_alloc_stats;
1788 }
1789
1790 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1791 &update_stats_cache);
1792
1793 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1794 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1795
1796 err = mlxsw_sp_port_module_map(mlxsw_sp_port);
1797 if (err) {
1798 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1799 mlxsw_sp_port->local_port);
1800 goto err_port_module_map;
1801 }
1802
1803 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1804 if (err) {
1805 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1806 mlxsw_sp_port->local_port);
1807 goto err_port_swid_set;
1808 }
1809
1810 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1811 if (err) {
1812 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1813 mlxsw_sp_port->local_port);
1814 goto err_dev_addr_init;
1815 }
1816
1817 netif_carrier_off(dev);
1818
1819 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1820 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1821 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1822
1823 dev->min_mtu = 0;
1824 dev->max_mtu = ETH_MAX_MTU;
1825
1826
1827
1828
1829 dev->needed_headroom = MLXSW_TXHDR_LEN;
1830
1831 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1832 if (err) {
1833 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1834 mlxsw_sp_port->local_port);
1835 goto err_port_system_port_mapping_set;
1836 }
1837
1838 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1839 if (err) {
1840 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1841 mlxsw_sp_port->local_port);
1842 goto err_port_speed_by_width_set;
1843 }
1844
1845 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1846 if (err) {
1847 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1848 mlxsw_sp_port->local_port);
1849 goto err_port_mtu_set;
1850 }
1851
1852 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1853 if (err)
1854 goto err_port_admin_status_set;
1855
1856 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1857 if (err) {
1858 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1859 mlxsw_sp_port->local_port);
1860 goto err_port_buffers_init;
1861 }
1862
1863 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1864 if (err) {
1865 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1866 mlxsw_sp_port->local_port);
1867 goto err_port_ets_init;
1868 }
1869
1870 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1871 if (err) {
1872 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1873 mlxsw_sp_port->local_port);
1874 goto err_port_tc_mc_mode;
1875 }
1876
1877
1878 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1879 if (err) {
1880 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1881 mlxsw_sp_port->local_port);
1882 goto err_port_dcb_init;
1883 }
1884
1885 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1886 if (err) {
1887 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1888 mlxsw_sp_port->local_port);
1889 goto err_port_fids_init;
1890 }
1891
1892 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1893 if (err) {
1894 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1895 mlxsw_sp_port->local_port);
1896 goto err_port_qdiscs_init;
1897 }
1898
1899 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1900 false);
1901 if (err) {
1902 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1903 mlxsw_sp_port->local_port);
1904 goto err_port_vlan_clear;
1905 }
1906
1907 err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1908 if (err) {
1909 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1910 mlxsw_sp_port->local_port);
1911 goto err_port_nve_init;
1912 }
1913
1914 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
1915 if (err) {
1916 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1917 mlxsw_sp_port->local_port);
1918 goto err_port_pvid_set;
1919 }
1920
1921 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1922 MLXSW_SP_DEFAULT_VID);
1923 if (IS_ERR(mlxsw_sp_port_vlan)) {
1924 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1925 mlxsw_sp_port->local_port);
1926 err = PTR_ERR(mlxsw_sp_port_vlan);
1927 goto err_port_vlan_create;
1928 }
1929 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1930
1931 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1932 mlxsw_sp->ptp_ops->shaper_work);
1933 INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw,
1934 mlxsw_sp_span_speed_update_work);
1935
1936 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1937 err = register_netdev(dev);
1938 if (err) {
1939 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1940 mlxsw_sp_port->local_port);
1941 goto err_register_netdev;
1942 }
1943
1944 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1945 mlxsw_sp_port, dev);
1946 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1947 return 0;
1948
1949err_register_netdev:
1950 mlxsw_sp->ports[local_port] = NULL;
1951 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1952err_port_vlan_create:
1953err_port_pvid_set:
1954 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1955err_port_nve_init:
1956err_port_vlan_clear:
1957 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1958err_port_qdiscs_init:
1959 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1960err_port_fids_init:
1961 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1962err_port_dcb_init:
1963 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1964err_port_tc_mc_mode:
1965err_port_ets_init:
1966err_port_buffers_init:
1967err_port_admin_status_set:
1968err_port_mtu_set:
1969err_port_speed_by_width_set:
1970err_port_system_port_mapping_set:
1971err_dev_addr_init:
1972 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1973err_port_swid_set:
1974 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1975err_port_module_map:
1976 free_percpu(mlxsw_sp_port->pcpu_stats);
1977err_alloc_stats:
1978 free_netdev(dev);
1979err_alloc_etherdev:
1980 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1981 return err;
1982}
1983
1984static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1985{
1986 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1987
1988 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1989 cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw);
1990 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1991 mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1992 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1993 unregister_netdev(mlxsw_sp_port->dev);
1994 mlxsw_sp->ports[local_port] = NULL;
1995 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1996 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1997 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1998 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1999 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2000 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
2001 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2002 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
2003 free_percpu(mlxsw_sp_port->pcpu_stats);
2004 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
2005 free_netdev(mlxsw_sp_port->dev);
2006 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2007}
2008
2009static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
2010{
2011 struct mlxsw_sp_port *mlxsw_sp_port;
2012 int err;
2013
2014 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
2015 if (!mlxsw_sp_port)
2016 return -ENOMEM;
2017
2018 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2019 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
2020
2021 err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
2022 mlxsw_sp_port,
2023 mlxsw_sp->base_mac,
2024 sizeof(mlxsw_sp->base_mac));
2025 if (err) {
2026 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
2027 goto err_core_cpu_port_init;
2028 }
2029
2030 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
2031 return 0;
2032
2033err_core_cpu_port_init:
2034 kfree(mlxsw_sp_port);
2035 return err;
2036}
2037
2038static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
2039{
2040 struct mlxsw_sp_port *mlxsw_sp_port =
2041 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
2042
2043 mlxsw_core_cpu_port_fini(mlxsw_sp->core);
2044 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
2045 kfree(mlxsw_sp_port);
2046}
2047
2048static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2049{
2050 return mlxsw_sp->ports[local_port] != NULL;
2051}
2052
2053static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2054{
2055 int i;
2056
2057 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
2058 if (mlxsw_sp_port_created(mlxsw_sp, i))
2059 mlxsw_sp_port_remove(mlxsw_sp, i);
2060 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2061 kfree(mlxsw_sp->ports);
2062 mlxsw_sp->ports = NULL;
2063}
2064
2065static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2066{
2067 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2068 struct mlxsw_sp_port_mapping *port_mapping;
2069 size_t alloc_size;
2070 int i;
2071 int err;
2072
2073 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2074 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2075 if (!mlxsw_sp->ports)
2076 return -ENOMEM;
2077
2078 err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2079 if (err)
2080 goto err_cpu_port_create;
2081
2082 for (i = 1; i < max_ports; i++) {
2083 port_mapping = mlxsw_sp->port_mapping[i];
2084 if (!port_mapping)
2085 continue;
2086 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
2087 if (err)
2088 goto err_port_create;
2089 }
2090 return 0;
2091
2092err_port_create:
2093 for (i--; i >= 1; i--)
2094 if (mlxsw_sp_port_created(mlxsw_sp, i))
2095 mlxsw_sp_port_remove(mlxsw_sp, i);
2096 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2097err_cpu_port_create:
2098 kfree(mlxsw_sp->ports);
2099 mlxsw_sp->ports = NULL;
2100 return err;
2101}
2102
2103static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2104{
2105 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2106 struct mlxsw_sp_port_mapping port_mapping;
2107 int i;
2108 int err;
2109
2110 mlxsw_sp->port_mapping = kcalloc(max_ports,
2111 sizeof(struct mlxsw_sp_port_mapping *),
2112 GFP_KERNEL);
2113 if (!mlxsw_sp->port_mapping)
2114 return -ENOMEM;
2115
2116 for (i = 1; i < max_ports; i++) {
2117 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
2118 if (err)
2119 goto err_port_module_info_get;
2120 if (!port_mapping.width)
2121 continue;
2122
2123 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
2124 sizeof(port_mapping),
2125 GFP_KERNEL);
2126 if (!mlxsw_sp->port_mapping[i]) {
2127 err = -ENOMEM;
2128 goto err_port_module_info_dup;
2129 }
2130 }
2131 return 0;
2132
2133err_port_module_info_get:
2134err_port_module_info_dup:
2135 for (i--; i >= 1; i--)
2136 kfree(mlxsw_sp->port_mapping[i]);
2137 kfree(mlxsw_sp->port_mapping);
2138 return err;
2139}
2140
2141static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2142{
2143 int i;
2144
2145 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
2146 kfree(mlxsw_sp->port_mapping[i]);
2147 kfree(mlxsw_sp->port_mapping);
2148}
2149
2150static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
2151{
2152 u8 offset = (local_port - 1) % max_width;
2153
2154 return local_port - offset;
2155}
2156
2157static int
2158mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2159 struct mlxsw_sp_port_mapping *port_mapping,
2160 unsigned int count, u8 offset)
2161{
2162 struct mlxsw_sp_port_mapping split_port_mapping;
2163 int err, i;
2164
2165 split_port_mapping = *port_mapping;
2166 split_port_mapping.width /= count;
2167 for (i = 0; i < count; i++) {
2168 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
2169 base_port, &split_port_mapping);
2170 if (err)
2171 goto err_port_create;
2172 split_port_mapping.lane += split_port_mapping.width;
2173 }
2174
2175 return 0;
2176
2177err_port_create:
2178 for (i--; i >= 0; i--)
2179 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2180 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2181 return err;
2182}
2183
2184static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2185 u8 base_port,
2186 unsigned int count, u8 offset)
2187{
2188 struct mlxsw_sp_port_mapping *port_mapping;
2189 int i;
2190
2191
2192 for (i = 0; i < count * offset; i++) {
2193 port_mapping = mlxsw_sp->port_mapping[base_port + i];
2194 if (!port_mapping)
2195 continue;
2196 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
2197 }
2198}
2199
2200static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
2201 unsigned int count,
2202 unsigned int max_width)
2203{
2204 enum mlxsw_res_id local_ports_in_x_res_id;
2205 int split_width = max_width / count;
2206
2207 if (split_width == 1)
2208 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
2209 else if (split_width == 2)
2210 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
2211 else if (split_width == 4)
2212 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
2213 else
2214 return -EINVAL;
2215
2216 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
2217 return -EINVAL;
2218 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
2219}
2220
2221static struct mlxsw_sp_port *
2222mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2223{
2224 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2225 return mlxsw_sp->ports[local_port];
2226 return NULL;
2227}
2228
2229static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2230 unsigned int count,
2231 struct netlink_ext_ack *extack)
2232{
2233 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2234 struct mlxsw_sp_port_mapping port_mapping;
2235 struct mlxsw_sp_port *mlxsw_sp_port;
2236 int max_width;
2237 u8 base_port;
2238 int offset;
2239 int i;
2240 int err;
2241
2242 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2243 if (!mlxsw_sp_port) {
2244 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2245 local_port);
2246 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2247 return -EINVAL;
2248 }
2249
2250 max_width = mlxsw_core_module_max_width(mlxsw_core,
2251 mlxsw_sp_port->mapping.module);
2252 if (max_width < 0) {
2253 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2254 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2255 return max_width;
2256 }
2257
2258
2259 if (mlxsw_sp_port->mapping.width != max_width) {
2260 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
2261 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
2262 return -EINVAL;
2263 }
2264
2265 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2266 if (offset < 0) {
2267 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2268 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2269 return -EINVAL;
2270 }
2271
2272
2273
2274
2275 base_port = count == max_width ?
2276 mlxsw_sp_cluster_base_port_get(local_port, max_width) :
2277 local_port;
2278
2279 for (i = 0; i < count * offset; i++) {
2280
2281
2282
2283 if (i == 0 || (count == max_width && i == count / 2))
2284 continue;
2285
2286 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
2287 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2288 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
2289 return -EINVAL;
2290 }
2291 }
2292
2293 port_mapping = mlxsw_sp_port->mapping;
2294
2295 for (i = 0; i < count; i++)
2296 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2297 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2298
2299 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
2300 count, offset);
2301 if (err) {
2302 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2303 goto err_port_split_create;
2304 }
2305
2306 return 0;
2307
2308err_port_split_create:
2309 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2310 return err;
2311}
2312
2313static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
2314 struct netlink_ext_ack *extack)
2315{
2316 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2317 struct mlxsw_sp_port *mlxsw_sp_port;
2318 unsigned int count;
2319 int max_width;
2320 u8 base_port;
2321 int offset;
2322 int i;
2323
2324 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2325 if (!mlxsw_sp_port) {
2326 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2327 local_port);
2328 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2329 return -EINVAL;
2330 }
2331
2332 if (!mlxsw_sp_port->split) {
2333 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
2334 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2335 return -EINVAL;
2336 }
2337
2338 max_width = mlxsw_core_module_max_width(mlxsw_core,
2339 mlxsw_sp_port->mapping.module);
2340 if (max_width < 0) {
2341 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2342 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2343 return max_width;
2344 }
2345
2346 count = max_width / mlxsw_sp_port->mapping.width;
2347
2348 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2349 if (WARN_ON(offset < 0)) {
2350 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2351 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2352 return -EINVAL;
2353 }
2354
2355 base_port = mlxsw_sp_port->split_base_local_port;
2356
2357 for (i = 0; i < count; i++)
2358 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2359 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2360
2361 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2362
2363 return 0;
2364}
2365
2366static void
2367mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2368{
2369 int i;
2370
2371 for (i = 0; i < TC_MAX_QUEUE; i++)
2372 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2373}
2374
2375static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2376 char *pude_pl, void *priv)
2377{
2378 struct mlxsw_sp *mlxsw_sp = priv;
2379 struct mlxsw_sp_port *mlxsw_sp_port;
2380 enum mlxsw_reg_pude_oper_status status;
2381 u8 local_port;
2382
2383 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2384 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2385 if (!mlxsw_sp_port)
2386 return;
2387
2388 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2389 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2390 netdev_info(mlxsw_sp_port->dev, "link up\n");
2391 netif_carrier_on(mlxsw_sp_port->dev);
2392 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2393 mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0);
2394 } else {
2395 netdev_info(mlxsw_sp_port->dev, "link down\n");
2396 netif_carrier_off(mlxsw_sp_port->dev);
2397 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2398 }
2399}
2400
2401static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2402 char *mtpptr_pl, bool ingress)
2403{
2404 u8 local_port;
2405 u8 num_rec;
2406 int i;
2407
2408 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2409 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2410 for (i = 0; i < num_rec; i++) {
2411 u8 domain_number;
2412 u8 message_type;
2413 u16 sequence_id;
2414 u64 timestamp;
2415
2416 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2417 &domain_number, &sequence_id,
2418 ×tamp);
2419 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2420 message_type, domain_number,
2421 sequence_id, timestamp);
2422 }
2423}
2424
2425static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2426 char *mtpptr_pl, void *priv)
2427{
2428 struct mlxsw_sp *mlxsw_sp = priv;
2429
2430 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2431}
2432
2433static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2434 char *mtpptr_pl, void *priv)
2435{
2436 struct mlxsw_sp *mlxsw_sp = priv;
2437
2438 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2439}
2440
2441void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2442 u8 local_port, void *priv)
2443{
2444 struct mlxsw_sp *mlxsw_sp = priv;
2445 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2446 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2447
2448 if (unlikely(!mlxsw_sp_port)) {
2449 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2450 local_port);
2451 return;
2452 }
2453
2454 skb->dev = mlxsw_sp_port->dev;
2455
2456 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2457 u64_stats_update_begin(&pcpu_stats->syncp);
2458 pcpu_stats->rx_packets++;
2459 pcpu_stats->rx_bytes += skb->len;
2460 u64_stats_update_end(&pcpu_stats->syncp);
2461
2462 skb->protocol = eth_type_trans(skb, skb->dev);
2463 netif_receive_skb(skb);
2464}
2465
2466static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2467 void *priv)
2468{
2469 skb->offload_fwd_mark = 1;
2470 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2471}
2472
2473static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2474 u8 local_port, void *priv)
2475{
2476 skb->offload_l3_fwd_mark = 1;
2477 skb->offload_fwd_mark = 1;
2478 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2479}
2480
2481void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2482 u8 local_port)
2483{
2484 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2485}
2486
2487void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2488 u8 local_port)
2489{
2490 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2491 struct mlxsw_sp_port_sample *sample;
2492 u32 size;
2493
2494 if (unlikely(!mlxsw_sp_port)) {
2495 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2496 local_port);
2497 goto out;
2498 }
2499
2500 rcu_read_lock();
2501 sample = rcu_dereference(mlxsw_sp_port->sample);
2502 if (!sample)
2503 goto out_unlock;
2504 size = sample->truncate ? sample->trunc_size : skb->len;
2505 psample_sample_packet(sample->psample_group, skb, size,
2506 mlxsw_sp_port->dev->ifindex, 0, sample->rate);
2507out_unlock:
2508 rcu_read_unlock();
2509out:
2510 consume_skb(skb);
2511}
2512
2513#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2514 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2515 _is_ctrl, SP_##_trap_group, DISCARD)
2516
2517#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2518 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2519 _is_ctrl, SP_##_trap_group, DISCARD)
2520
2521#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2522 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2523 _is_ctrl, SP_##_trap_group, DISCARD)
2524
2525#define MLXSW_SP_EVENTL(_func, _trap_id) \
2526 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2527
2528static const struct mlxsw_listener mlxsw_sp_listener[] = {
2529
2530 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2531
2532 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2533
2534 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2535 false),
2536 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2537 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2538 false),
2539 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2540 ROUTER_EXP, false),
2541 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2542 ROUTER_EXP, false),
2543 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2544 ROUTER_EXP, false),
2545 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2546 ROUTER_EXP, false),
2547
2548 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2549 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2550
2551 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2552};
2553
2554static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2555
2556 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2557 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2558};
2559
2560static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2561{
2562 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2563 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2564 enum mlxsw_reg_qpcr_ir_units ir_units;
2565 int max_cpu_policers;
2566 bool is_bytes;
2567 u8 burst_size;
2568 u32 rate;
2569 int i, err;
2570
2571 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2572 return -EIO;
2573
2574 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2575
2576 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2577 for (i = 0; i < max_cpu_policers; i++) {
2578 is_bytes = false;
2579 switch (i) {
2580 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2581 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2582 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2583 rate = 1024;
2584 burst_size = 7;
2585 break;
2586 default:
2587 continue;
2588 }
2589
2590 __set_bit(i, mlxsw_sp->trap->policers_usage);
2591 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2592 burst_size);
2593 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2594 if (err)
2595 return err;
2596 }
2597
2598 return 0;
2599}
2600
2601static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2602{
2603 char htgt_pl[MLXSW_REG_HTGT_LEN];
2604 enum mlxsw_reg_htgt_trap_group i;
2605 int max_cpu_policers;
2606 int max_trap_groups;
2607 u8 priority, tc;
2608 u16 policer_id;
2609 int err;
2610
2611 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2612 return -EIO;
2613
2614 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2615 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2616
2617 for (i = 0; i < max_trap_groups; i++) {
2618 policer_id = i;
2619 switch (i) {
2620 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2621 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2622 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2623 priority = 1;
2624 tc = 1;
2625 break;
2626 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2627 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2628 tc = MLXSW_REG_HTGT_DEFAULT_TC;
2629 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2630 break;
2631 default:
2632 continue;
2633 }
2634
2635 if (max_cpu_policers <= policer_id &&
2636 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2637 return -EIO;
2638
2639 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2640 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2641 if (err)
2642 return err;
2643 }
2644
2645 return 0;
2646}
2647
2648static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
2649 const struct mlxsw_listener listeners[],
2650 size_t listeners_count)
2651{
2652 int i;
2653 int err;
2654
2655 for (i = 0; i < listeners_count; i++) {
2656 err = mlxsw_core_trap_register(mlxsw_sp->core,
2657 &listeners[i],
2658 mlxsw_sp);
2659 if (err)
2660 goto err_listener_register;
2661
2662 }
2663 return 0;
2664
2665err_listener_register:
2666 for (i--; i >= 0; i--) {
2667 mlxsw_core_trap_unregister(mlxsw_sp->core,
2668 &listeners[i],
2669 mlxsw_sp);
2670 }
2671 return err;
2672}
2673
2674static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
2675 const struct mlxsw_listener listeners[],
2676 size_t listeners_count)
2677{
2678 int i;
2679
2680 for (i = 0; i < listeners_count; i++) {
2681 mlxsw_core_trap_unregister(mlxsw_sp->core,
2682 &listeners[i],
2683 mlxsw_sp);
2684 }
2685}
2686
2687static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2688{
2689 struct mlxsw_sp_trap *trap;
2690 u64 max_policers;
2691 int err;
2692
2693 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2694 return -EIO;
2695 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2696 trap = kzalloc(struct_size(trap, policers_usage,
2697 BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2698 if (!trap)
2699 return -ENOMEM;
2700 trap->max_policers = max_policers;
2701 mlxsw_sp->trap = trap;
2702
2703 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2704 if (err)
2705 goto err_cpu_policers_set;
2706
2707 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2708 if (err)
2709 goto err_trap_groups_set;
2710
2711 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
2712 ARRAY_SIZE(mlxsw_sp_listener));
2713 if (err)
2714 goto err_traps_register;
2715
2716 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
2717 mlxsw_sp->listeners_count);
2718 if (err)
2719 goto err_extra_traps_init;
2720
2721 return 0;
2722
2723err_extra_traps_init:
2724 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2725 ARRAY_SIZE(mlxsw_sp_listener));
2726err_traps_register:
2727err_trap_groups_set:
2728err_cpu_policers_set:
2729 kfree(trap);
2730 return err;
2731}
2732
2733static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2734{
2735 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
2736 mlxsw_sp->listeners_count);
2737 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2738 ARRAY_SIZE(mlxsw_sp_listener));
2739 kfree(mlxsw_sp->trap);
2740}
2741
2742#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2743
2744static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2745{
2746 char slcr_pl[MLXSW_REG_SLCR_LEN];
2747 u32 seed;
2748 int err;
2749
2750 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2751 MLXSW_SP_LAG_SEED_INIT);
2752 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2753 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2754 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2755 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2756 MLXSW_REG_SLCR_LAG_HASH_SIP |
2757 MLXSW_REG_SLCR_LAG_HASH_DIP |
2758 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2759 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2760 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2761 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2762 if (err)
2763 return err;
2764
2765 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2766 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2767 return -EIO;
2768
2769 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2770 sizeof(struct mlxsw_sp_upper),
2771 GFP_KERNEL);
2772 if (!mlxsw_sp->lags)
2773 return -ENOMEM;
2774
2775 return 0;
2776}
2777
2778static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2779{
2780 kfree(mlxsw_sp->lags);
2781}
2782
2783static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
2784{
2785 char htgt_pl[MLXSW_REG_HTGT_LEN];
2786
2787 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
2788 MLXSW_REG_HTGT_INVALID_POLICER,
2789 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2790 MLXSW_REG_HTGT_DEFAULT_TC);
2791 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2792}
2793
2794static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2795 .clock_init = mlxsw_sp1_ptp_clock_init,
2796 .clock_fini = mlxsw_sp1_ptp_clock_fini,
2797 .init = mlxsw_sp1_ptp_init,
2798 .fini = mlxsw_sp1_ptp_fini,
2799 .receive = mlxsw_sp1_ptp_receive,
2800 .transmitted = mlxsw_sp1_ptp_transmitted,
2801 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
2802 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
2803 .shaper_work = mlxsw_sp1_ptp_shaper_work,
2804 .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
2805 .get_stats_count = mlxsw_sp1_get_stats_count,
2806 .get_stats_strings = mlxsw_sp1_get_stats_strings,
2807 .get_stats = mlxsw_sp1_get_stats,
2808};
2809
2810static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2811 .clock_init = mlxsw_sp2_ptp_clock_init,
2812 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2813 .init = mlxsw_sp2_ptp_init,
2814 .fini = mlxsw_sp2_ptp_fini,
2815 .receive = mlxsw_sp2_ptp_receive,
2816 .transmitted = mlxsw_sp2_ptp_transmitted,
2817 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2818 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2819 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2820 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2821 .get_stats_count = mlxsw_sp2_get_stats_count,
2822 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2823 .get_stats = mlxsw_sp2_get_stats,
2824};
2825
2826static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2827 unsigned long event, void *ptr);
2828
2829static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2830 const struct mlxsw_bus_info *mlxsw_bus_info,
2831 struct netlink_ext_ack *extack)
2832{
2833 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2834 int err;
2835
2836 mlxsw_sp->core = mlxsw_core;
2837 mlxsw_sp->bus_info = mlxsw_bus_info;
2838
2839 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
2840 if (err)
2841 return err;
2842
2843 mlxsw_core_emad_string_tlv_enable(mlxsw_core);
2844
2845 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2846 if (err) {
2847 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2848 return err;
2849 }
2850
2851 err = mlxsw_sp_kvdl_init(mlxsw_sp);
2852 if (err) {
2853 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
2854 return err;
2855 }
2856
2857 err = mlxsw_sp_fids_init(mlxsw_sp);
2858 if (err) {
2859 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
2860 goto err_fids_init;
2861 }
2862
2863 err = mlxsw_sp_policers_init(mlxsw_sp);
2864 if (err) {
2865 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
2866 goto err_policers_init;
2867 }
2868
2869 err = mlxsw_sp_traps_init(mlxsw_sp);
2870 if (err) {
2871 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
2872 goto err_traps_init;
2873 }
2874
2875 err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
2876 if (err) {
2877 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
2878 goto err_devlink_traps_init;
2879 }
2880
2881 err = mlxsw_sp_buffers_init(mlxsw_sp);
2882 if (err) {
2883 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2884 goto err_buffers_init;
2885 }
2886
2887 err = mlxsw_sp_lag_init(mlxsw_sp);
2888 if (err) {
2889 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2890 goto err_lag_init;
2891 }
2892
2893
2894
2895
2896 err = mlxsw_sp_span_init(mlxsw_sp);
2897 if (err) {
2898 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2899 goto err_span_init;
2900 }
2901
2902 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2903 if (err) {
2904 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2905 goto err_switchdev_init;
2906 }
2907
2908 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
2909 if (err) {
2910 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
2911 goto err_counter_pool_init;
2912 }
2913
2914 err = mlxsw_sp_afa_init(mlxsw_sp);
2915 if (err) {
2916 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
2917 goto err_afa_init;
2918 }
2919
2920 err = mlxsw_sp_nve_init(mlxsw_sp);
2921 if (err) {
2922 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
2923 goto err_nve_init;
2924 }
2925
2926 err = mlxsw_sp_acl_init(mlxsw_sp);
2927 if (err) {
2928 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
2929 goto err_acl_init;
2930 }
2931
2932 err = mlxsw_sp_router_init(mlxsw_sp, extack);
2933 if (err) {
2934 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2935 goto err_router_init;
2936 }
2937
2938 if (mlxsw_sp->bus_info->read_frc_capable) {
2939
2940 mlxsw_sp->clock =
2941 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
2942 mlxsw_sp->bus_info->dev);
2943 if (IS_ERR(mlxsw_sp->clock)) {
2944 err = PTR_ERR(mlxsw_sp->clock);
2945 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
2946 goto err_ptp_clock_init;
2947 }
2948 }
2949
2950 if (mlxsw_sp->clock) {
2951
2952 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
2953 if (IS_ERR(mlxsw_sp->ptp_state)) {
2954 err = PTR_ERR(mlxsw_sp->ptp_state);
2955 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
2956 goto err_ptp_init;
2957 }
2958 }
2959
2960
2961
2962
2963
2964 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
2965 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2966 &mlxsw_sp->netdevice_nb);
2967 if (err) {
2968 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
2969 goto err_netdev_notifier;
2970 }
2971
2972 err = mlxsw_sp_dpipe_init(mlxsw_sp);
2973 if (err) {
2974 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
2975 goto err_dpipe_init;
2976 }
2977
2978 err = mlxsw_sp_port_module_info_init(mlxsw_sp);
2979 if (err) {
2980 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
2981 goto err_port_module_info_init;
2982 }
2983
2984 err = mlxsw_sp_ports_create(mlxsw_sp);
2985 if (err) {
2986 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2987 goto err_ports_create;
2988 }
2989
2990 return 0;
2991
2992err_ports_create:
2993 mlxsw_sp_port_module_info_fini(mlxsw_sp);
2994err_port_module_info_init:
2995 mlxsw_sp_dpipe_fini(mlxsw_sp);
2996err_dpipe_init:
2997 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2998 &mlxsw_sp->netdevice_nb);
2999err_netdev_notifier:
3000 if (mlxsw_sp->clock)
3001 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3002err_ptp_init:
3003 if (mlxsw_sp->clock)
3004 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3005err_ptp_clock_init:
3006 mlxsw_sp_router_fini(mlxsw_sp);
3007err_router_init:
3008 mlxsw_sp_acl_fini(mlxsw_sp);
3009err_acl_init:
3010 mlxsw_sp_nve_fini(mlxsw_sp);
3011err_nve_init:
3012 mlxsw_sp_afa_fini(mlxsw_sp);
3013err_afa_init:
3014 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3015err_counter_pool_init:
3016 mlxsw_sp_switchdev_fini(mlxsw_sp);
3017err_switchdev_init:
3018 mlxsw_sp_span_fini(mlxsw_sp);
3019err_span_init:
3020 mlxsw_sp_lag_fini(mlxsw_sp);
3021err_lag_init:
3022 mlxsw_sp_buffers_fini(mlxsw_sp);
3023err_buffers_init:
3024 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3025err_devlink_traps_init:
3026 mlxsw_sp_traps_fini(mlxsw_sp);
3027err_traps_init:
3028 mlxsw_sp_policers_fini(mlxsw_sp);
3029err_policers_init:
3030 mlxsw_sp_fids_fini(mlxsw_sp);
3031err_fids_init:
3032 mlxsw_sp_kvdl_fini(mlxsw_sp);
3033 return err;
3034}
3035
3036static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3037 const struct mlxsw_bus_info *mlxsw_bus_info,
3038 struct netlink_ext_ack *extack)
3039{
3040 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3041
3042 mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
3043 mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
3044 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3045 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3046 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3047 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3048 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3049 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3050 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3051 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3052 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
3053 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3054 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3055 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3056 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3057 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3058 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3059 mlxsw_sp->listeners = mlxsw_sp1_listener;
3060 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3061 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3062
3063 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3064}
3065
3066static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3067 const struct mlxsw_bus_info *mlxsw_bus_info,
3068 struct netlink_ext_ack *extack)
3069{
3070 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3071
3072 mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev;
3073 mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME;
3074 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3075 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3076 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3077 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3078 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3079 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3080 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3081 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3082 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
3083 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3084 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3085 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3086 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3087 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3088 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3089 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3090
3091 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3092}
3093
3094static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3095 const struct mlxsw_bus_info *mlxsw_bus_info,
3096 struct netlink_ext_ack *extack)
3097{
3098 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3099
3100 mlxsw_sp->req_rev = &mlxsw_sp3_fw_rev;
3101 mlxsw_sp->fw_filename = MLXSW_SP3_FW_FILENAME;
3102 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3103 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3104 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3105 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3106 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3107 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3108 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3109 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3110 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
3111 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3112 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3113 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3114 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3115 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3116 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3117 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3118
3119 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3120}
3121
3122static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3123{
3124 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3125
3126 mlxsw_sp_ports_remove(mlxsw_sp);
3127 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3128 mlxsw_sp_dpipe_fini(mlxsw_sp);
3129 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3130 &mlxsw_sp->netdevice_nb);
3131 if (mlxsw_sp->clock) {
3132 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3133 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3134 }
3135 mlxsw_sp_router_fini(mlxsw_sp);
3136 mlxsw_sp_acl_fini(mlxsw_sp);
3137 mlxsw_sp_nve_fini(mlxsw_sp);
3138 mlxsw_sp_afa_fini(mlxsw_sp);
3139 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3140 mlxsw_sp_switchdev_fini(mlxsw_sp);
3141 mlxsw_sp_span_fini(mlxsw_sp);
3142 mlxsw_sp_lag_fini(mlxsw_sp);
3143 mlxsw_sp_buffers_fini(mlxsw_sp);
3144 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3145 mlxsw_sp_traps_fini(mlxsw_sp);
3146 mlxsw_sp_policers_fini(mlxsw_sp);
3147 mlxsw_sp_fids_fini(mlxsw_sp);
3148 mlxsw_sp_kvdl_fini(mlxsw_sp);
3149}
3150
3151
3152
3153
3154#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
3155 VLAN_VID_MASK - 1)
3156
3157static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3158 .used_max_mid = 1,
3159 .max_mid = MLXSW_SP_MID_MAX,
3160 .used_flood_tables = 1,
3161 .used_flood_mode = 1,
3162 .flood_mode = 3,
3163 .max_fid_flood_tables = 3,
3164 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
3165 .used_max_ib_mc = 1,
3166 .max_ib_mc = 0,
3167 .used_max_pkey = 1,
3168 .max_pkey = 0,
3169 .used_kvd_sizes = 1,
3170 .kvd_hash_single_parts = 59,
3171 .kvd_hash_double_parts = 41,
3172 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3173 .swid_config = {
3174 {
3175 .used_type = 1,
3176 .type = MLXSW_PORT_SWID_TYPE_ETH,
3177 }
3178 },
3179};
3180
3181static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3182 .used_max_mid = 1,
3183 .max_mid = MLXSW_SP_MID_MAX,
3184 .used_flood_tables = 1,
3185 .used_flood_mode = 1,
3186 .flood_mode = 3,
3187 .max_fid_flood_tables = 3,
3188 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
3189 .used_max_ib_mc = 1,
3190 .max_ib_mc = 0,
3191 .used_max_pkey = 1,
3192 .max_pkey = 0,
3193 .swid_config = {
3194 {
3195 .used_type = 1,
3196 .type = MLXSW_PORT_SWID_TYPE_ETH,
3197 }
3198 },
3199};
3200
3201static void
3202mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3203 struct devlink_resource_size_params *kvd_size_params,
3204 struct devlink_resource_size_params *linear_size_params,
3205 struct devlink_resource_size_params *hash_double_size_params,
3206 struct devlink_resource_size_params *hash_single_size_params)
3207{
3208 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3209 KVD_SINGLE_MIN_SIZE);
3210 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3211 KVD_DOUBLE_MIN_SIZE);
3212 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3213 u32 linear_size_min = 0;
3214
3215 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3216 MLXSW_SP_KVD_GRANULARITY,
3217 DEVLINK_RESOURCE_UNIT_ENTRY);
3218 devlink_resource_size_params_init(linear_size_params, linear_size_min,
3219 kvd_size - single_size_min -
3220 double_size_min,
3221 MLXSW_SP_KVD_GRANULARITY,
3222 DEVLINK_RESOURCE_UNIT_ENTRY);
3223 devlink_resource_size_params_init(hash_double_size_params,
3224 double_size_min,
3225 kvd_size - single_size_min -
3226 linear_size_min,
3227 MLXSW_SP_KVD_GRANULARITY,
3228 DEVLINK_RESOURCE_UNIT_ENTRY);
3229 devlink_resource_size_params_init(hash_single_size_params,
3230 single_size_min,
3231 kvd_size - double_size_min -
3232 linear_size_min,
3233 MLXSW_SP_KVD_GRANULARITY,
3234 DEVLINK_RESOURCE_UNIT_ENTRY);
3235}
3236
3237static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3238{
3239 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3240 struct devlink_resource_size_params hash_single_size_params;
3241 struct devlink_resource_size_params hash_double_size_params;
3242 struct devlink_resource_size_params linear_size_params;
3243 struct devlink_resource_size_params kvd_size_params;
3244 u32 kvd_size, single_size, double_size, linear_size;
3245 const struct mlxsw_config_profile *profile;
3246 int err;
3247
3248 profile = &mlxsw_sp1_config_profile;
3249 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3250 return -EIO;
3251
3252 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3253 &linear_size_params,
3254 &hash_double_size_params,
3255 &hash_single_size_params);
3256
3257 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3258 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3259 kvd_size, MLXSW_SP_RESOURCE_KVD,
3260 DEVLINK_RESOURCE_ID_PARENT_TOP,
3261 &kvd_size_params);
3262 if (err)
3263 return err;
3264
3265 linear_size = profile->kvd_linear_size;
3266 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3267 linear_size,
3268 MLXSW_SP_RESOURCE_KVD_LINEAR,
3269 MLXSW_SP_RESOURCE_KVD,
3270 &linear_size_params);
3271 if (err)
3272 return err;
3273
3274 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3275 if (err)
3276 return err;
3277
3278 double_size = kvd_size - linear_size;
3279 double_size *= profile->kvd_hash_double_parts;
3280 double_size /= profile->kvd_hash_double_parts +
3281 profile->kvd_hash_single_parts;
3282 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3283 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3284 double_size,
3285 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3286 MLXSW_SP_RESOURCE_KVD,
3287 &hash_double_size_params);
3288 if (err)
3289 return err;
3290
3291 single_size = kvd_size - double_size - linear_size;
3292 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3293 single_size,
3294 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3295 MLXSW_SP_RESOURCE_KVD,
3296 &hash_single_size_params);
3297 if (err)
3298 return err;
3299
3300 return 0;
3301}
3302
3303static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3304{
3305 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3306 struct devlink_resource_size_params kvd_size_params;
3307 u32 kvd_size;
3308
3309 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3310 return -EIO;
3311
3312 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3313 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3314 MLXSW_SP_KVD_GRANULARITY,
3315 DEVLINK_RESOURCE_UNIT_ENTRY);
3316
3317 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3318 kvd_size, MLXSW_SP_RESOURCE_KVD,
3319 DEVLINK_RESOURCE_ID_PARENT_TOP,
3320 &kvd_size_params);
3321}
3322
3323static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3324{
3325 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3326 struct devlink_resource_size_params span_size_params;
3327 u32 max_span;
3328
3329 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3330 return -EIO;
3331
3332 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3333 devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3334 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3335
3336 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3337 max_span, MLXSW_SP_RESOURCE_SPAN,
3338 DEVLINK_RESOURCE_ID_PARENT_TOP,
3339 &span_size_params);
3340}
3341
3342static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3343{
3344 int err;
3345
3346 err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3347 if (err)
3348 return err;
3349
3350 err = mlxsw_sp_resources_span_register(mlxsw_core);
3351 if (err)
3352 goto err_resources_span_register;
3353
3354 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3355 if (err)
3356 goto err_resources_counter_register;
3357
3358 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3359 if (err)
3360 goto err_resources_counter_register;
3361
3362 return 0;
3363
3364err_resources_counter_register:
3365err_resources_span_register:
3366 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3367 return err;
3368}
3369
3370static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3371{
3372 int err;
3373
3374 err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3375 if (err)
3376 return err;
3377
3378 err = mlxsw_sp_resources_span_register(mlxsw_core);
3379 if (err)
3380 goto err_resources_span_register;
3381
3382 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3383 if (err)
3384 goto err_resources_counter_register;
3385
3386 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3387 if (err)
3388 goto err_resources_counter_register;
3389
3390 return 0;
3391
3392err_resources_counter_register:
3393err_resources_span_register:
3394 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3395 return err;
3396}
3397
3398static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3399 const struct mlxsw_config_profile *profile,
3400 u64 *p_single_size, u64 *p_double_size,
3401 u64 *p_linear_size)
3402{
3403 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3404 u32 double_size;
3405 int err;
3406
3407 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3408 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3409 return -EIO;
3410
3411
3412
3413
3414
3415
3416
3417
3418 err = devlink_resource_size_get(devlink,
3419 MLXSW_SP_RESOURCE_KVD_LINEAR,
3420 p_linear_size);
3421 if (err)
3422 *p_linear_size = profile->kvd_linear_size;
3423
3424 err = devlink_resource_size_get(devlink,
3425 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3426 p_double_size);
3427 if (err) {
3428 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3429 *p_linear_size;
3430 double_size *= profile->kvd_hash_double_parts;
3431 double_size /= profile->kvd_hash_double_parts +
3432 profile->kvd_hash_single_parts;
3433 *p_double_size = rounddown(double_size,
3434 MLXSW_SP_KVD_GRANULARITY);
3435 }
3436
3437 err = devlink_resource_size_get(devlink,
3438 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3439 p_single_size);
3440 if (err)
3441 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3442 *p_double_size - *p_linear_size;
3443
3444
3445 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3446 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3447 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3448 return -EIO;
3449
3450 return 0;
3451}
3452
3453static int
3454mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
3455 union devlink_param_value val,
3456 struct netlink_ext_ack *extack)
3457{
3458 if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
3459 (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
3460 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
3461 return -EINVAL;
3462 }
3463
3464 return 0;
3465}
3466
3467static const struct devlink_param mlxsw_sp_devlink_params[] = {
3468 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
3469 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
3470 NULL, NULL,
3471 mlxsw_sp_devlink_param_fw_load_policy_validate),
3472};
3473
3474static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
3475{
3476 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3477 union devlink_param_value value;
3478 int err;
3479
3480 err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
3481 ARRAY_SIZE(mlxsw_sp_devlink_params));
3482 if (err)
3483 return err;
3484
3485 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
3486 devlink_param_driverinit_value_set(devlink,
3487 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
3488 value);
3489 return 0;
3490}
3491
3492static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
3493{
3494 devlink_params_unregister(priv_to_devlink(mlxsw_core),
3495 mlxsw_sp_devlink_params,
3496 ARRAY_SIZE(mlxsw_sp_devlink_params));
3497}
3498
3499static int
3500mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3501 struct devlink_param_gset_ctx *ctx)
3502{
3503 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3504 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3505
3506 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3507 return 0;
3508}
3509
3510static int
3511mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3512 struct devlink_param_gset_ctx *ctx)
3513{
3514 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3515 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3516
3517 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3518}
3519
3520static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3521 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3522 "acl_region_rehash_interval",
3523 DEVLINK_PARAM_TYPE_U32,
3524 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3525 mlxsw_sp_params_acl_region_rehash_intrvl_get,
3526 mlxsw_sp_params_acl_region_rehash_intrvl_set,
3527 NULL),
3528};
3529
3530static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3531{
3532 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3533 union devlink_param_value value;
3534 int err;
3535
3536 err = mlxsw_sp_params_register(mlxsw_core);
3537 if (err)
3538 return err;
3539
3540 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3541 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3542 if (err)
3543 goto err_devlink_params_register;
3544
3545 value.vu32 = 0;
3546 devlink_param_driverinit_value_set(devlink,
3547 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3548 value);
3549 return 0;
3550
3551err_devlink_params_register:
3552 mlxsw_sp_params_unregister(mlxsw_core);
3553 return err;
3554}
3555
3556static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3557{
3558 devlink_params_unregister(priv_to_devlink(mlxsw_core),
3559 mlxsw_sp2_devlink_params,
3560 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3561 mlxsw_sp_params_unregister(mlxsw_core);
3562}
3563
3564static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3565 struct sk_buff *skb, u8 local_port)
3566{
3567 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3568
3569 skb_pull(skb, MLXSW_TXHDR_LEN);
3570 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3571}
3572
3573static struct mlxsw_driver mlxsw_sp1_driver = {
3574 .kind = mlxsw_sp1_driver_name,
3575 .priv_size = sizeof(struct mlxsw_sp),
3576 .init = mlxsw_sp1_init,
3577 .fini = mlxsw_sp_fini,
3578 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3579 .port_split = mlxsw_sp_port_split,
3580 .port_unsplit = mlxsw_sp_port_unsplit,
3581 .sb_pool_get = mlxsw_sp_sb_pool_get,
3582 .sb_pool_set = mlxsw_sp_sb_pool_set,
3583 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3584 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3585 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3586 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3587 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3588 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3589 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3590 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3591 .flash_update = mlxsw_sp_flash_update,
3592 .trap_init = mlxsw_sp_trap_init,
3593 .trap_fini = mlxsw_sp_trap_fini,
3594 .trap_action_set = mlxsw_sp_trap_action_set,
3595 .trap_group_init = mlxsw_sp_trap_group_init,
3596 .trap_group_set = mlxsw_sp_trap_group_set,
3597 .trap_policer_init = mlxsw_sp_trap_policer_init,
3598 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3599 .trap_policer_set = mlxsw_sp_trap_policer_set,
3600 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3601 .txhdr_construct = mlxsw_sp_txhdr_construct,
3602 .resources_register = mlxsw_sp1_resources_register,
3603 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
3604 .params_register = mlxsw_sp_params_register,
3605 .params_unregister = mlxsw_sp_params_unregister,
3606 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3607 .txhdr_len = MLXSW_TXHDR_LEN,
3608 .profile = &mlxsw_sp1_config_profile,
3609 .res_query_enabled = true,
3610};
3611
3612static struct mlxsw_driver mlxsw_sp2_driver = {
3613 .kind = mlxsw_sp2_driver_name,
3614 .priv_size = sizeof(struct mlxsw_sp),
3615 .init = mlxsw_sp2_init,
3616 .fini = mlxsw_sp_fini,
3617 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3618 .port_split = mlxsw_sp_port_split,
3619 .port_unsplit = mlxsw_sp_port_unsplit,
3620 .sb_pool_get = mlxsw_sp_sb_pool_get,
3621 .sb_pool_set = mlxsw_sp_sb_pool_set,
3622 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3623 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3624 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3625 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3626 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3627 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3628 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3629 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3630 .flash_update = mlxsw_sp_flash_update,
3631 .trap_init = mlxsw_sp_trap_init,
3632 .trap_fini = mlxsw_sp_trap_fini,
3633 .trap_action_set = mlxsw_sp_trap_action_set,
3634 .trap_group_init = mlxsw_sp_trap_group_init,
3635 .trap_group_set = mlxsw_sp_trap_group_set,
3636 .trap_policer_init = mlxsw_sp_trap_policer_init,
3637 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3638 .trap_policer_set = mlxsw_sp_trap_policer_set,
3639 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3640 .txhdr_construct = mlxsw_sp_txhdr_construct,
3641 .resources_register = mlxsw_sp2_resources_register,
3642 .params_register = mlxsw_sp2_params_register,
3643 .params_unregister = mlxsw_sp2_params_unregister,
3644 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3645 .txhdr_len = MLXSW_TXHDR_LEN,
3646 .profile = &mlxsw_sp2_config_profile,
3647 .res_query_enabled = true,
3648};
3649
3650static struct mlxsw_driver mlxsw_sp3_driver = {
3651 .kind = mlxsw_sp3_driver_name,
3652 .priv_size = sizeof(struct mlxsw_sp),
3653 .init = mlxsw_sp3_init,
3654 .fini = mlxsw_sp_fini,
3655 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3656 .port_split = mlxsw_sp_port_split,
3657 .port_unsplit = mlxsw_sp_port_unsplit,
3658 .sb_pool_get = mlxsw_sp_sb_pool_get,
3659 .sb_pool_set = mlxsw_sp_sb_pool_set,
3660 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3661 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3662 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3663 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3664 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3665 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3666 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3667 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3668 .flash_update = mlxsw_sp_flash_update,
3669 .trap_init = mlxsw_sp_trap_init,
3670 .trap_fini = mlxsw_sp_trap_fini,
3671 .trap_action_set = mlxsw_sp_trap_action_set,
3672 .trap_group_init = mlxsw_sp_trap_group_init,
3673 .trap_group_set = mlxsw_sp_trap_group_set,
3674 .trap_policer_init = mlxsw_sp_trap_policer_init,
3675 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3676 .trap_policer_set = mlxsw_sp_trap_policer_set,
3677 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3678 .txhdr_construct = mlxsw_sp_txhdr_construct,
3679 .resources_register = mlxsw_sp2_resources_register,
3680 .params_register = mlxsw_sp2_params_register,
3681 .params_unregister = mlxsw_sp2_params_unregister,
3682 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3683 .txhdr_len = MLXSW_TXHDR_LEN,
3684 .profile = &mlxsw_sp2_config_profile,
3685 .res_query_enabled = true,
3686};
3687
3688bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3689{
3690 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3691}
3692
3693static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
3694 struct netdev_nested_priv *priv)
3695{
3696 int ret = 0;
3697
3698 if (mlxsw_sp_port_dev_check(lower_dev)) {
3699 priv->data = (void *)netdev_priv(lower_dev);
3700 ret = 1;
3701 }
3702
3703 return ret;
3704}
3705
3706struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3707{
3708 struct netdev_nested_priv priv = {
3709 .data = NULL,
3710 };
3711
3712 if (mlxsw_sp_port_dev_check(dev))
3713 return netdev_priv(dev);
3714
3715 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
3716
3717 return (struct mlxsw_sp_port *)priv.data;
3718}
3719
3720struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3721{
3722 struct mlxsw_sp_port *mlxsw_sp_port;
3723
3724 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3725 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3726}
3727
3728struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3729{
3730 struct netdev_nested_priv priv = {
3731 .data = NULL,
3732 };
3733
3734 if (mlxsw_sp_port_dev_check(dev))
3735 return netdev_priv(dev);
3736
3737 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3738 &priv);
3739
3740 return (struct mlxsw_sp_port *)priv.data;
3741}
3742
3743struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3744{
3745 struct mlxsw_sp_port *mlxsw_sp_port;
3746
3747 rcu_read_lock();
3748 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3749 if (mlxsw_sp_port)
3750 dev_hold(mlxsw_sp_port->dev);
3751 rcu_read_unlock();
3752 return mlxsw_sp_port;
3753}
3754
3755void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3756{
3757 dev_put(mlxsw_sp_port->dev);
3758}
3759
3760static void
3761mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3762 struct net_device *lag_dev)
3763{
3764 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3765 struct net_device *upper_dev;
3766 struct list_head *iter;
3767
3768 if (netif_is_bridge_port(lag_dev))
3769 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3770
3771 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3772 if (!netif_is_bridge_port(upper_dev))
3773 continue;
3774 br_dev = netdev_master_upper_dev_get(upper_dev);
3775 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3776 }
3777}
3778
3779static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3780{
3781 char sldr_pl[MLXSW_REG_SLDR_LEN];
3782
3783 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3784 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3785}
3786
3787static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3788{
3789 char sldr_pl[MLXSW_REG_SLDR_LEN];
3790
3791 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3792 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3793}
3794
3795static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3796 u16 lag_id, u8 port_index)
3797{
3798 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3799 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3800
3801 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3802 lag_id, port_index);
3803 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3804}
3805
3806static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3807 u16 lag_id)
3808{
3809 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3810 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3811
3812 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3813 lag_id);
3814 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3815}
3816
3817static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3818 u16 lag_id)
3819{
3820 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3821 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3822
3823 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3824 lag_id);
3825 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3826}
3827
3828static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3829 u16 lag_id)
3830{
3831 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3832 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3833
3834 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3835 lag_id);
3836 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3837}
3838
3839static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3840 struct net_device *lag_dev,
3841 u16 *p_lag_id)
3842{
3843 struct mlxsw_sp_upper *lag;
3844 int free_lag_id = -1;
3845 u64 max_lag;
3846 int i;
3847
3848 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3849 for (i = 0; i < max_lag; i++) {
3850 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3851 if (lag->ref_count) {
3852 if (lag->dev == lag_dev) {
3853 *p_lag_id = i;
3854 return 0;
3855 }
3856 } else if (free_lag_id < 0) {
3857 free_lag_id = i;
3858 }
3859 }
3860 if (free_lag_id < 0)
3861 return -EBUSY;
3862 *p_lag_id = free_lag_id;
3863 return 0;
3864}
3865
3866static bool
3867mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3868 struct net_device *lag_dev,
3869 struct netdev_lag_upper_info *lag_upper_info,
3870 struct netlink_ext_ack *extack)
3871{
3872 u16 lag_id;
3873
3874 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
3875 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
3876 return false;
3877 }
3878 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
3879 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
3880 return false;
3881 }
3882 return true;
3883}
3884
3885static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3886 u16 lag_id, u8 *p_port_index)
3887{
3888 u64 max_lag_members;
3889 int i;
3890
3891 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3892 MAX_LAG_MEMBERS);
3893 for (i = 0; i < max_lag_members; i++) {
3894 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3895 *p_port_index = i;
3896 return 0;
3897 }
3898 }
3899 return -EBUSY;
3900}
3901
3902static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3903 struct net_device *lag_dev)
3904{
3905 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3906 struct mlxsw_sp_upper *lag;
3907 u16 lag_id;
3908 u8 port_index;
3909 int err;
3910
3911 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3912 if (err)
3913 return err;
3914 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3915 if (!lag->ref_count) {
3916 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3917 if (err)
3918 return err;
3919 lag->dev = lag_dev;
3920 }
3921
3922 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3923 if (err)
3924 return err;
3925 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3926 if (err)
3927 goto err_col_port_add;
3928
3929 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3930 mlxsw_sp_port->local_port);
3931 mlxsw_sp_port->lag_id = lag_id;
3932 mlxsw_sp_port->lagged = 1;
3933 lag->ref_count++;
3934
3935
3936 if (mlxsw_sp_port->default_vlan->fid)
3937 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
3938
3939 return 0;
3940
3941err_col_port_add:
3942 if (!lag->ref_count)
3943 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3944 return err;
3945}
3946
3947static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3948 struct net_device *lag_dev)
3949{
3950 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3951 u16 lag_id = mlxsw_sp_port->lag_id;
3952 struct mlxsw_sp_upper *lag;
3953
3954 if (!mlxsw_sp_port->lagged)
3955 return;
3956 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3957 WARN_ON(lag->ref_count == 0);
3958
3959 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3960
3961
3962 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
3963 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
3964
3965
3966
3967 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
3968
3969 if (lag->ref_count == 1)
3970 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3971
3972 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3973 mlxsw_sp_port->local_port);
3974 mlxsw_sp_port->lagged = 0;
3975 lag->ref_count--;
3976
3977
3978 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
3979}
3980
3981static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3982 u16 lag_id)
3983{
3984 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3985 char sldr_pl[MLXSW_REG_SLDR_LEN];
3986
3987 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3988 mlxsw_sp_port->local_port);
3989 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3990}
3991
3992static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3993 u16 lag_id)
3994{
3995 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3996 char sldr_pl[MLXSW_REG_SLDR_LEN];
3997
3998 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3999 mlxsw_sp_port->local_port);
4000 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4001}
4002
4003static int
4004mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4005{
4006 int err;
4007
4008 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4009 mlxsw_sp_port->lag_id);
4010 if (err)
4011 return err;
4012
4013 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4014 if (err)
4015 goto err_dist_port_add;
4016
4017 return 0;
4018
4019err_dist_port_add:
4020 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4021 return err;
4022}
4023
4024static int
4025mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4026{
4027 int err;
4028
4029 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4030 mlxsw_sp_port->lag_id);
4031 if (err)
4032 return err;
4033
4034 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4035 mlxsw_sp_port->lag_id);
4036 if (err)
4037 goto err_col_port_disable;
4038
4039 return 0;
4040
4041err_col_port_disable:
4042 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4043 return err;
4044}
4045
4046static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4047 struct netdev_lag_lower_state_info *info)
4048{
4049 if (info->tx_enabled)
4050 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4051 else
4052 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4053}
4054
4055static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4056 bool enable)
4057{
4058 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4059 enum mlxsw_reg_spms_state spms_state;
4060 char *spms_pl;
4061 u16 vid;
4062 int err;
4063
4064 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4065 MLXSW_REG_SPMS_STATE_DISCARDING;
4066
4067 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4068 if (!spms_pl)
4069 return -ENOMEM;
4070 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4071
4072 for (vid = 0; vid < VLAN_N_VID; vid++)
4073 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4074
4075 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4076 kfree(spms_pl);
4077 return err;
4078}
4079
4080static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4081{
4082 u16 vid = 1;
4083 int err;
4084
4085 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4086 if (err)
4087 return err;
4088 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4089 if (err)
4090 goto err_port_stp_set;
4091 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4092 true, false);
4093 if (err)
4094 goto err_port_vlan_set;
4095
4096 for (; vid <= VLAN_N_VID - 1; vid++) {
4097 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4098 vid, false);
4099 if (err)
4100 goto err_vid_learning_set;
4101 }
4102
4103 return 0;
4104
4105err_vid_learning_set:
4106 for (vid--; vid >= 1; vid--)
4107 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4108err_port_vlan_set:
4109 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4110err_port_stp_set:
4111 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4112 return err;
4113}
4114
4115static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4116{
4117 u16 vid;
4118
4119 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4120 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4121 vid, true);
4122
4123 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4124 false, false);
4125 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4126 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4127}
4128
4129static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4130{
4131 unsigned int num_vxlans = 0;
4132 struct net_device *dev;
4133 struct list_head *iter;
4134
4135 netdev_for_each_lower_dev(br_dev, dev, iter) {
4136 if (netif_is_vxlan(dev))
4137 num_vxlans++;
4138 }
4139
4140 return num_vxlans > 1;
4141}
4142
4143static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4144{
4145 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4146 struct net_device *dev;
4147 struct list_head *iter;
4148
4149 netdev_for_each_lower_dev(br_dev, dev, iter) {
4150 u16 pvid;
4151 int err;
4152
4153 if (!netif_is_vxlan(dev))
4154 continue;
4155
4156 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4157 if (err || !pvid)
4158 continue;
4159
4160 if (test_and_set_bit(pvid, vlans))
4161 return false;
4162 }
4163
4164 return true;
4165}
4166
4167static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4168 struct netlink_ext_ack *extack)
4169{
4170 if (br_multicast_enabled(br_dev)) {
4171 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4172 return false;
4173 }
4174
4175 if (!br_vlan_enabled(br_dev) &&
4176 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4177 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4178 return false;
4179 }
4180
4181 if (br_vlan_enabled(br_dev) &&
4182 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4183 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4184 return false;
4185 }
4186
4187 return true;
4188}
4189
4190static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4191 struct net_device *dev,
4192 unsigned long event, void *ptr)
4193{
4194 struct netdev_notifier_changeupper_info *info;
4195 struct mlxsw_sp_port *mlxsw_sp_port;
4196 struct netlink_ext_ack *extack;
4197 struct net_device *upper_dev;
4198 struct mlxsw_sp *mlxsw_sp;
4199 int err = 0;
4200
4201 mlxsw_sp_port = netdev_priv(dev);
4202 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4203 info = ptr;
4204 extack = netdev_notifier_info_to_extack(&info->info);
4205
4206 switch (event) {
4207 case NETDEV_PRECHANGEUPPER:
4208 upper_dev = info->upper_dev;
4209 if (!is_vlan_dev(upper_dev) &&
4210 !netif_is_lag_master(upper_dev) &&
4211 !netif_is_bridge_master(upper_dev) &&
4212 !netif_is_ovs_master(upper_dev) &&
4213 !netif_is_macvlan(upper_dev)) {
4214 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4215 return -EINVAL;
4216 }
4217 if (!info->linking)
4218 break;
4219 if (netif_is_bridge_master(upper_dev) &&
4220 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4221 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4222 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4223 return -EOPNOTSUPP;
4224 if (netdev_has_any_upper_dev(upper_dev) &&
4225 (!netif_is_bridge_master(upper_dev) ||
4226 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4227 upper_dev))) {
4228 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4229 return -EINVAL;
4230 }
4231 if (netif_is_lag_master(upper_dev) &&
4232 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4233 info->upper_info, extack))
4234 return -EINVAL;
4235 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4236 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4237 return -EINVAL;
4238 }
4239 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4240 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4241 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4242 return -EINVAL;
4243 }
4244 if (netif_is_macvlan(upper_dev) &&
4245 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
4246 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4247 return -EOPNOTSUPP;
4248 }
4249 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4250 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4251 return -EINVAL;
4252 }
4253 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4254 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4255 return -EINVAL;
4256 }
4257 break;
4258 case NETDEV_CHANGEUPPER:
4259 upper_dev = info->upper_dev;
4260 if (netif_is_bridge_master(upper_dev)) {
4261 if (info->linking)
4262 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4263 lower_dev,
4264 upper_dev,
4265 extack);
4266 else
4267 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4268 lower_dev,
4269 upper_dev);
4270 } else if (netif_is_lag_master(upper_dev)) {
4271 if (info->linking) {
4272 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4273 upper_dev);
4274 } else {
4275 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4276 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4277 upper_dev);
4278 }
4279 } else if (netif_is_ovs_master(upper_dev)) {
4280 if (info->linking)
4281 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4282 else
4283 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4284 } else if (netif_is_macvlan(upper_dev)) {
4285 if (!info->linking)
4286 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4287 } else if (is_vlan_dev(upper_dev)) {
4288 struct net_device *br_dev;
4289
4290 if (!netif_is_bridge_port(upper_dev))
4291 break;
4292 if (info->linking)
4293 break;
4294 br_dev = netdev_master_upper_dev_get(upper_dev);
4295 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4296 br_dev);
4297 }
4298 break;
4299 }
4300
4301 return err;
4302}
4303
4304static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4305 unsigned long event, void *ptr)
4306{
4307 struct netdev_notifier_changelowerstate_info *info;
4308 struct mlxsw_sp_port *mlxsw_sp_port;
4309 int err;
4310
4311 mlxsw_sp_port = netdev_priv(dev);
4312 info = ptr;
4313
4314 switch (event) {
4315 case NETDEV_CHANGELOWERSTATE:
4316 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4317 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4318 info->lower_state_info);
4319 if (err)
4320 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4321 }
4322 break;
4323 }
4324
4325 return 0;
4326}
4327
4328static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4329 struct net_device *port_dev,
4330 unsigned long event, void *ptr)
4331{
4332 switch (event) {
4333 case NETDEV_PRECHANGEUPPER:
4334 case NETDEV_CHANGEUPPER:
4335 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4336 event, ptr);
4337 case NETDEV_CHANGELOWERSTATE:
4338 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4339 ptr);
4340 }
4341
4342 return 0;
4343}
4344
4345static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4346 unsigned long event, void *ptr)
4347{
4348 struct net_device *dev;
4349 struct list_head *iter;
4350 int ret;
4351
4352 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4353 if (mlxsw_sp_port_dev_check(dev)) {
4354 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4355 ptr);
4356 if (ret)
4357 return ret;
4358 }
4359 }
4360
4361 return 0;
4362}
4363
4364static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4365 struct net_device *dev,
4366 unsigned long event, void *ptr,
4367 u16 vid)
4368{
4369 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4370 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4371 struct netdev_notifier_changeupper_info *info = ptr;
4372 struct netlink_ext_ack *extack;
4373 struct net_device *upper_dev;
4374 int err = 0;
4375
4376 extack = netdev_notifier_info_to_extack(&info->info);
4377
4378 switch (event) {
4379 case NETDEV_PRECHANGEUPPER:
4380 upper_dev = info->upper_dev;
4381 if (!netif_is_bridge_master(upper_dev) &&
4382 !netif_is_macvlan(upper_dev)) {
4383 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4384 return -EINVAL;
4385 }
4386 if (!info->linking)
4387 break;
4388 if (netif_is_bridge_master(upper_dev) &&
4389 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4390 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4391 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4392 return -EOPNOTSUPP;
4393 if (netdev_has_any_upper_dev(upper_dev) &&
4394 (!netif_is_bridge_master(upper_dev) ||
4395 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4396 upper_dev))) {
4397 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4398 return -EINVAL;
4399 }
4400 if (netif_is_macvlan(upper_dev) &&
4401 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4402 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4403 return -EOPNOTSUPP;
4404 }
4405 break;
4406 case NETDEV_CHANGEUPPER:
4407 upper_dev = info->upper_dev;
4408 if (netif_is_bridge_master(upper_dev)) {
4409 if (info->linking)
4410 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4411 vlan_dev,
4412 upper_dev,
4413 extack);
4414 else
4415 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4416 vlan_dev,
4417 upper_dev);
4418 } else if (netif_is_macvlan(upper_dev)) {
4419 if (!info->linking)
4420 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4421 } else {
4422 err = -EINVAL;
4423 WARN_ON(1);
4424 }
4425 break;
4426 }
4427
4428 return err;
4429}
4430
4431static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4432 struct net_device *lag_dev,
4433 unsigned long event,
4434 void *ptr, u16 vid)
4435{
4436 struct net_device *dev;
4437 struct list_head *iter;
4438 int ret;
4439
4440 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4441 if (mlxsw_sp_port_dev_check(dev)) {
4442 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4443 event, ptr,
4444 vid);
4445 if (ret)
4446 return ret;
4447 }
4448 }
4449
4450 return 0;
4451}
4452
4453static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4454 struct net_device *br_dev,
4455 unsigned long event, void *ptr,
4456 u16 vid)
4457{
4458 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4459 struct netdev_notifier_changeupper_info *info = ptr;
4460 struct netlink_ext_ack *extack;
4461 struct net_device *upper_dev;
4462
4463 if (!mlxsw_sp)
4464 return 0;
4465
4466 extack = netdev_notifier_info_to_extack(&info->info);
4467
4468 switch (event) {
4469 case NETDEV_PRECHANGEUPPER:
4470 upper_dev = info->upper_dev;
4471 if (!netif_is_macvlan(upper_dev)) {
4472 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4473 return -EOPNOTSUPP;
4474 }
4475 if (!info->linking)
4476 break;
4477 if (netif_is_macvlan(upper_dev) &&
4478 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4479 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4480 return -EOPNOTSUPP;
4481 }
4482 break;
4483 case NETDEV_CHANGEUPPER:
4484 upper_dev = info->upper_dev;
4485 if (info->linking)
4486 break;
4487 if (netif_is_macvlan(upper_dev))
4488 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4489 break;
4490 }
4491
4492 return 0;
4493}
4494
4495static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4496 unsigned long event, void *ptr)
4497{
4498 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4499 u16 vid = vlan_dev_vlan_id(vlan_dev);
4500
4501 if (mlxsw_sp_port_dev_check(real_dev))
4502 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4503 event, ptr, vid);
4504 else if (netif_is_lag_master(real_dev))
4505 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4506 real_dev, event,
4507 ptr, vid);
4508 else if (netif_is_bridge_master(real_dev))
4509 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4510 event, ptr, vid);
4511
4512 return 0;
4513}
4514
4515static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4516 unsigned long event, void *ptr)
4517{
4518 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4519 struct netdev_notifier_changeupper_info *info = ptr;
4520 struct netlink_ext_ack *extack;
4521 struct net_device *upper_dev;
4522
4523 if (!mlxsw_sp)
4524 return 0;
4525
4526 extack = netdev_notifier_info_to_extack(&info->info);
4527
4528 switch (event) {
4529 case NETDEV_PRECHANGEUPPER:
4530 upper_dev = info->upper_dev;
4531 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4532 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4533 return -EOPNOTSUPP;
4534 }
4535 if (!info->linking)
4536 break;
4537 if (netif_is_macvlan(upper_dev) &&
4538 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
4539 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4540 return -EOPNOTSUPP;
4541 }
4542 break;
4543 case NETDEV_CHANGEUPPER:
4544 upper_dev = info->upper_dev;
4545 if (info->linking)
4546 break;
4547 if (is_vlan_dev(upper_dev))
4548 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4549 if (netif_is_macvlan(upper_dev))
4550 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4551 break;
4552 }
4553
4554 return 0;
4555}
4556
4557static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4558 unsigned long event, void *ptr)
4559{
4560 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4561 struct netdev_notifier_changeupper_info *info = ptr;
4562 struct netlink_ext_ack *extack;
4563
4564 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4565 return 0;
4566
4567 extack = netdev_notifier_info_to_extack(&info->info);
4568
4569
4570 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4571
4572 return -EOPNOTSUPP;
4573}
4574
4575static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4576{
4577 struct netdev_notifier_changeupper_info *info = ptr;
4578
4579 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4580 return false;
4581 return netif_is_l3_master(info->upper_dev);
4582}
4583
4584static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
4585 struct net_device *dev,
4586 unsigned long event, void *ptr)
4587{
4588 struct netdev_notifier_changeupper_info *cu_info;
4589 struct netdev_notifier_info *info = ptr;
4590 struct netlink_ext_ack *extack;
4591 struct net_device *upper_dev;
4592
4593 extack = netdev_notifier_info_to_extack(info);
4594
4595 switch (event) {
4596 case NETDEV_CHANGEUPPER:
4597 cu_info = container_of(info,
4598 struct netdev_notifier_changeupper_info,
4599 info);
4600 upper_dev = cu_info->upper_dev;
4601 if (!netif_is_bridge_master(upper_dev))
4602 return 0;
4603 if (!mlxsw_sp_lower_get(upper_dev))
4604 return 0;
4605 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4606 return -EOPNOTSUPP;
4607 if (cu_info->linking) {
4608 if (!netif_running(dev))
4609 return 0;
4610
4611
4612
4613
4614 if (br_vlan_enabled(upper_dev))
4615 return 0;
4616 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
4617 dev, 0, extack);
4618 } else {
4619
4620
4621
4622 if (br_vlan_enabled(upper_dev))
4623 return 0;
4624 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4625 }
4626 break;
4627 case NETDEV_PRE_UP:
4628 upper_dev = netdev_master_upper_dev_get(dev);
4629 if (!upper_dev)
4630 return 0;
4631 if (!netif_is_bridge_master(upper_dev))
4632 return 0;
4633 if (!mlxsw_sp_lower_get(upper_dev))
4634 return 0;
4635 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
4636 extack);
4637 case NETDEV_DOWN:
4638 upper_dev = netdev_master_upper_dev_get(dev);
4639 if (!upper_dev)
4640 return 0;
4641 if (!netif_is_bridge_master(upper_dev))
4642 return 0;
4643 if (!mlxsw_sp_lower_get(upper_dev))
4644 return 0;
4645 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4646 break;
4647 }
4648
4649 return 0;
4650}
4651
4652static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4653 unsigned long event, void *ptr)
4654{
4655 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4656 struct mlxsw_sp_span_entry *span_entry;
4657 struct mlxsw_sp *mlxsw_sp;
4658 int err = 0;
4659
4660 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4661 if (event == NETDEV_UNREGISTER) {
4662 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4663 if (span_entry)
4664 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4665 }
4666 mlxsw_sp_span_respin(mlxsw_sp);
4667
4668 if (netif_is_vxlan(dev))
4669 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
4670 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4671 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4672 event, ptr);
4673 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4674 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4675 event, ptr);
4676 else if (event == NETDEV_PRE_CHANGEADDR ||
4677 event == NETDEV_CHANGEADDR ||
4678 event == NETDEV_CHANGEMTU)
4679 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
4680 else if (mlxsw_sp_is_vrf_event(event, ptr))
4681 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4682 else if (mlxsw_sp_port_dev_check(dev))
4683 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4684 else if (netif_is_lag_master(dev))
4685 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4686 else if (is_vlan_dev(dev))
4687 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4688 else if (netif_is_bridge_master(dev))
4689 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4690 else if (netif_is_macvlan(dev))
4691 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
4692
4693 return notifier_from_errno(err);
4694}
4695
4696static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4697 .notifier_call = mlxsw_sp_inetaddr_valid_event,
4698};
4699
4700static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4701 .notifier_call = mlxsw_sp_inet6addr_valid_event,
4702};
4703
4704static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
4705 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4706 {0, },
4707};
4708
4709static struct pci_driver mlxsw_sp1_pci_driver = {
4710 .name = mlxsw_sp1_driver_name,
4711 .id_table = mlxsw_sp1_pci_id_table,
4712};
4713
4714static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
4715 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
4716 {0, },
4717};
4718
4719static struct pci_driver mlxsw_sp2_pci_driver = {
4720 .name = mlxsw_sp2_driver_name,
4721 .id_table = mlxsw_sp2_pci_id_table,
4722};
4723
4724static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
4725 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
4726 {0, },
4727};
4728
4729static struct pci_driver mlxsw_sp3_pci_driver = {
4730 .name = mlxsw_sp3_driver_name,
4731 .id_table = mlxsw_sp3_pci_id_table,
4732};
4733
4734static int __init mlxsw_sp_module_init(void)
4735{
4736 int err;
4737
4738 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4739 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4740
4741 err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
4742 if (err)
4743 goto err_sp1_core_driver_register;
4744
4745 err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
4746 if (err)
4747 goto err_sp2_core_driver_register;
4748
4749 err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
4750 if (err)
4751 goto err_sp3_core_driver_register;
4752
4753 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
4754 if (err)
4755 goto err_sp1_pci_driver_register;
4756
4757 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
4758 if (err)
4759 goto err_sp2_pci_driver_register;
4760
4761 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
4762 if (err)
4763 goto err_sp3_pci_driver_register;
4764
4765 return 0;
4766
4767err_sp3_pci_driver_register:
4768 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4769err_sp2_pci_driver_register:
4770 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4771err_sp1_pci_driver_register:
4772 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4773err_sp3_core_driver_register:
4774 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4775err_sp2_core_driver_register:
4776 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4777err_sp1_core_driver_register:
4778 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4779 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4780 return err;
4781}
4782
4783static void __exit mlxsw_sp_module_exit(void)
4784{
4785 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
4786 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4787 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4788 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4789 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4790 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4791 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4792 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4793}
4794
4795module_init(mlxsw_sp_module_init);
4796module_exit(mlxsw_sp_module_exit);
4797
4798MODULE_LICENSE("Dual BSD/GPL");
4799MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4800MODULE_DESCRIPTION("Mellanox Spectrum driver");
4801MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
4802MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
4803MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
4804MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
4805MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
4806MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
4807