1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/device.h>
7#include <linux/export.h>
8#include <linux/err.h>
9#include <linux/if_link.h>
10#include <linux/netdevice.h>
11#include <linux/completion.h>
12#include <linux/skbuff.h>
13#include <linux/etherdevice.h>
14#include <linux/types.h>
15#include <linux/string.h>
16#include <linux/gfp.h>
17#include <linux/random.h>
18#include <linux/jiffies.h>
19#include <linux/mutex.h>
20#include <linux/rcupdate.h>
21#include <linux/slab.h>
22#include <linux/workqueue.h>
23#include <asm/byteorder.h>
24#include <net/devlink.h>
25#include <trace/events/devlink.h>
26
27#include "core.h"
28#include "item.h"
29#include "cmd.h"
30#include "port.h"
31#include "trap.h"
32#include "emad.h"
33#include "reg.h"
34#include "resources.h"
35
36static LIST_HEAD(mlxsw_core_driver_list);
37static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
38
39static const char mlxsw_core_driver_name[] = "mlxsw_core";
40
41static struct workqueue_struct *mlxsw_wq;
42static struct workqueue_struct *mlxsw_owq;
43
44struct mlxsw_core_port {
45 struct devlink_port devlink_port;
46 void *port_driver_priv;
47 u8 local_port;
48};
49
50void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
51{
52 return mlxsw_core_port->port_driver_priv;
53}
54EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
55
56static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
57{
58 return mlxsw_core_port->port_driver_priv != NULL;
59}
60
61struct mlxsw_core {
62 struct mlxsw_driver *driver;
63 const struct mlxsw_bus *bus;
64 void *bus_priv;
65 const struct mlxsw_bus_info *bus_info;
66 struct workqueue_struct *emad_wq;
67 struct list_head rx_listener_list;
68 struct list_head event_listener_list;
69 struct {
70 atomic64_t tid;
71 struct list_head trans_list;
72 spinlock_t trans_list_lock;
73 bool use_emad;
74 } emad;
75 struct {
76 u8 *mapping;
77 } lag;
78 struct mlxsw_res res;
79 struct mlxsw_hwmon *hwmon;
80 struct mlxsw_thermal *thermal;
81 struct mlxsw_core_port *ports;
82 unsigned int max_ports;
83 bool reload_fail;
84 bool fw_flash_in_progress;
85 unsigned long driver_priv[0];
86
87};
88
89#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
90
91static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
92{
93
94 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
95 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
96 MAX_SYSTEM_PORT) + 1;
97 else
98 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
99
100 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
101 sizeof(struct mlxsw_core_port), GFP_KERNEL);
102 if (!mlxsw_core->ports)
103 return -ENOMEM;
104
105 return 0;
106}
107
108static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
109{
110 kfree(mlxsw_core->ports);
111}
112
113unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
114{
115 return mlxsw_core->max_ports;
116}
117EXPORT_SYMBOL(mlxsw_core_max_ports);
118
119void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
120{
121 return mlxsw_core->driver_priv;
122}
123EXPORT_SYMBOL(mlxsw_core_driver_priv);
124
125bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
126{
127 return mlxsw_core->driver->res_query_enabled;
128}
129EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
130
131struct mlxsw_rx_listener_item {
132 struct list_head list;
133 struct mlxsw_rx_listener rxl;
134 void *priv;
135};
136
137struct mlxsw_event_listener_item {
138 struct list_head list;
139 struct mlxsw_event_listener el;
140 void *priv;
141};
142
143
144
145
146
147
148
149
150
151MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
152
153
154
155
156
157MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
158
159
160
161
162
163MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
164
165
166
167
168
169MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
170
171
172
173
174
175MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
176
177
178
179
180
181MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
182
183
184
185
186
187MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
188
189
190
191
192
193
194
195MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
213
214
215
216
217MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
218
219
220
221
222MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
223
224
225
226
227
228
229
230
231MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
232
233
234
235
236MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
237
238
239
240
241MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
242
243
244
245
246
247MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
248
249
250
251
252MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
253
254
255
256
257
258MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
259
260
261
262
263
264MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
265
266enum mlxsw_core_reg_access_type {
267 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
268 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
269};
270
271static inline const char *
272mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
273{
274 switch (type) {
275 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
276 return "query";
277 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
278 return "write";
279 }
280 BUG();
281}
282
283static void mlxsw_emad_pack_end_tlv(char *end_tlv)
284{
285 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
286 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
287}
288
289static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
290 const struct mlxsw_reg_info *reg,
291 char *payload)
292{
293 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
294 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
295 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
296}
297
298static void mlxsw_emad_pack_op_tlv(char *op_tlv,
299 const struct mlxsw_reg_info *reg,
300 enum mlxsw_core_reg_access_type type,
301 u64 tid)
302{
303 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
304 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
305 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
306 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
307 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
308 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
309 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
310 mlxsw_emad_op_tlv_method_set(op_tlv,
311 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
312 else
313 mlxsw_emad_op_tlv_method_set(op_tlv,
314 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
315 mlxsw_emad_op_tlv_class_set(op_tlv,
316 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
317 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
318}
319
320static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
321{
322 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
323
324 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
325 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
326 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
327 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
328 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
329
330 skb_reset_mac_header(skb);
331
332 return 0;
333}
334
335static void mlxsw_emad_construct(struct sk_buff *skb,
336 const struct mlxsw_reg_info *reg,
337 char *payload,
338 enum mlxsw_core_reg_access_type type,
339 u64 tid)
340{
341 char *buf;
342
343 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
344 mlxsw_emad_pack_end_tlv(buf);
345
346 buf = skb_push(skb, reg->len + sizeof(u32));
347 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
348
349 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
350 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
351
352 mlxsw_emad_construct_eth_hdr(skb);
353}
354
355static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
356{
357 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
358}
359
360static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
361{
362 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
363 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
364}
365
366static char *mlxsw_emad_reg_payload(const char *op_tlv)
367{
368 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
369}
370
371static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
372{
373 char *op_tlv;
374
375 op_tlv = mlxsw_emad_op_tlv(skb);
376 return mlxsw_emad_op_tlv_tid_get(op_tlv);
377}
378
379static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
380{
381 char *op_tlv;
382
383 op_tlv = mlxsw_emad_op_tlv(skb);
384 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
385}
386
387static int mlxsw_emad_process_status(char *op_tlv,
388 enum mlxsw_emad_op_tlv_status *p_status)
389{
390 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
391
392 switch (*p_status) {
393 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
394 return 0;
395 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
396 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
397 return -EAGAIN;
398 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
399 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
400 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
401 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
402 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
403 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
404 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
405 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
406 default:
407 return -EIO;
408 }
409}
410
411static int
412mlxsw_emad_process_status_skb(struct sk_buff *skb,
413 enum mlxsw_emad_op_tlv_status *p_status)
414{
415 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
416}
417
418struct mlxsw_reg_trans {
419 struct list_head list;
420 struct list_head bulk_list;
421 struct mlxsw_core *core;
422 struct sk_buff *tx_skb;
423 struct mlxsw_tx_info tx_info;
424 struct delayed_work timeout_dw;
425 unsigned int retries;
426 u64 tid;
427 struct completion completion;
428 atomic_t active;
429 mlxsw_reg_trans_cb_t *cb;
430 unsigned long cb_priv;
431 const struct mlxsw_reg_info *reg;
432 enum mlxsw_core_reg_access_type type;
433 int err;
434 enum mlxsw_emad_op_tlv_status emad_status;
435 struct rcu_head rcu;
436};
437
438#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
439#define MLXSW_EMAD_TIMEOUT_MS 200
440
441static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
442{
443 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
444
445 if (trans->core->fw_flash_in_progress)
446 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
447
448 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
449}
450
451static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
452 struct mlxsw_reg_trans *trans)
453{
454 struct sk_buff *skb;
455 int err;
456
457 skb = skb_copy(trans->tx_skb, GFP_KERNEL);
458 if (!skb)
459 return -ENOMEM;
460
461 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
462 skb->data + mlxsw_core->driver->txhdr_len,
463 skb->len - mlxsw_core->driver->txhdr_len);
464
465 atomic_set(&trans->active, 1);
466 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
467 if (err) {
468 dev_kfree_skb(skb);
469 return err;
470 }
471 mlxsw_emad_trans_timeout_schedule(trans);
472 return 0;
473}
474
475static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
476{
477 struct mlxsw_core *mlxsw_core = trans->core;
478
479 dev_kfree_skb(trans->tx_skb);
480 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
481 list_del_rcu(&trans->list);
482 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
483 trans->err = err;
484 complete(&trans->completion);
485}
486
487static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
488 struct mlxsw_reg_trans *trans)
489{
490 int err;
491
492 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
493 trans->retries++;
494 err = mlxsw_emad_transmit(trans->core, trans);
495 if (err == 0)
496 return;
497 } else {
498 err = -EIO;
499 }
500 mlxsw_emad_trans_finish(trans, err);
501}
502
503static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
504{
505 struct mlxsw_reg_trans *trans = container_of(work,
506 struct mlxsw_reg_trans,
507 timeout_dw.work);
508
509 if (!atomic_dec_and_test(&trans->active))
510 return;
511
512 mlxsw_emad_transmit_retry(trans->core, trans);
513}
514
515static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
516 struct mlxsw_reg_trans *trans,
517 struct sk_buff *skb)
518{
519 int err;
520
521 if (!atomic_dec_and_test(&trans->active))
522 return;
523
524 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
525 if (err == -EAGAIN) {
526 mlxsw_emad_transmit_retry(mlxsw_core, trans);
527 } else {
528 if (err == 0) {
529 char *op_tlv = mlxsw_emad_op_tlv(skb);
530
531 if (trans->cb)
532 trans->cb(mlxsw_core,
533 mlxsw_emad_reg_payload(op_tlv),
534 trans->reg->len, trans->cb_priv);
535 }
536 mlxsw_emad_trans_finish(trans, err);
537 }
538}
539
540
541static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
542 void *priv)
543{
544 struct mlxsw_core *mlxsw_core = priv;
545 struct mlxsw_reg_trans *trans;
546
547 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
548 skb->data, skb->len);
549
550 if (!mlxsw_emad_is_resp(skb))
551 goto free_skb;
552
553 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
554 if (mlxsw_emad_get_tid(skb) == trans->tid) {
555 mlxsw_emad_process_response(mlxsw_core, trans, skb);
556 break;
557 }
558 }
559
560free_skb:
561 dev_kfree_skb(skb);
562}
563
564static const struct mlxsw_listener mlxsw_emad_rx_listener =
565 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
566 EMAD, DISCARD);
567
568static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
569{
570 struct workqueue_struct *emad_wq;
571 u64 tid;
572 int err;
573
574 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
575 return 0;
576
577 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
578 if (!emad_wq)
579 return -ENOMEM;
580 mlxsw_core->emad_wq = emad_wq;
581
582
583
584
585
586 get_random_bytes(&tid, 4);
587 tid <<= 32;
588 atomic64_set(&mlxsw_core->emad.tid, tid);
589
590 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
591 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
592
593 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
594 mlxsw_core);
595 if (err)
596 return err;
597
598 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
599 if (err)
600 goto err_emad_trap_set;
601 mlxsw_core->emad.use_emad = true;
602
603 return 0;
604
605err_emad_trap_set:
606 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
607 mlxsw_core);
608 destroy_workqueue(mlxsw_core->emad_wq);
609 return err;
610}
611
612static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
613{
614
615 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
616 return;
617
618 mlxsw_core->emad.use_emad = false;
619 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
620 mlxsw_core);
621 destroy_workqueue(mlxsw_core->emad_wq);
622}
623
624static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
625 u16 reg_len)
626{
627 struct sk_buff *skb;
628 u16 emad_len;
629
630 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
631 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
632 sizeof(u32) + mlxsw_core->driver->txhdr_len);
633 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
634 return NULL;
635
636 skb = netdev_alloc_skb(NULL, emad_len);
637 if (!skb)
638 return NULL;
639 memset(skb->data, 0, emad_len);
640 skb_reserve(skb, emad_len);
641
642 return skb;
643}
644
645static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
646 const struct mlxsw_reg_info *reg,
647 char *payload,
648 enum mlxsw_core_reg_access_type type,
649 struct mlxsw_reg_trans *trans,
650 struct list_head *bulk_list,
651 mlxsw_reg_trans_cb_t *cb,
652 unsigned long cb_priv, u64 tid)
653{
654 struct sk_buff *skb;
655 int err;
656
657 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
658 tid, reg->id, mlxsw_reg_id_str(reg->id),
659 mlxsw_core_reg_access_type_str(type));
660
661 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
662 if (!skb)
663 return -ENOMEM;
664
665 list_add_tail(&trans->bulk_list, bulk_list);
666 trans->core = mlxsw_core;
667 trans->tx_skb = skb;
668 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
669 trans->tx_info.is_emad = true;
670 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
671 trans->tid = tid;
672 init_completion(&trans->completion);
673 trans->cb = cb;
674 trans->cb_priv = cb_priv;
675 trans->reg = reg;
676 trans->type = type;
677
678 mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
679 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
680
681 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
682 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
683 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
684 err = mlxsw_emad_transmit(mlxsw_core, trans);
685 if (err)
686 goto err_out;
687 return 0;
688
689err_out:
690 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
691 list_del_rcu(&trans->list);
692 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
693 list_del(&trans->bulk_list);
694 dev_kfree_skb(trans->tx_skb);
695 return err;
696}
697
698
699
700
701
702int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
703{
704 spin_lock(&mlxsw_core_driver_list_lock);
705 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
706 spin_unlock(&mlxsw_core_driver_list_lock);
707 return 0;
708}
709EXPORT_SYMBOL(mlxsw_core_driver_register);
710
711void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
712{
713 spin_lock(&mlxsw_core_driver_list_lock);
714 list_del(&mlxsw_driver->list);
715 spin_unlock(&mlxsw_core_driver_list_lock);
716}
717EXPORT_SYMBOL(mlxsw_core_driver_unregister);
718
719static struct mlxsw_driver *__driver_find(const char *kind)
720{
721 struct mlxsw_driver *mlxsw_driver;
722
723 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
724 if (strcmp(mlxsw_driver->kind, kind) == 0)
725 return mlxsw_driver;
726 }
727 return NULL;
728}
729
730static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
731{
732 struct mlxsw_driver *mlxsw_driver;
733
734 spin_lock(&mlxsw_core_driver_list_lock);
735 mlxsw_driver = __driver_find(kind);
736 spin_unlock(&mlxsw_core_driver_list_lock);
737 return mlxsw_driver;
738}
739
740static int mlxsw_devlink_port_split(struct devlink *devlink,
741 unsigned int port_index,
742 unsigned int count,
743 struct netlink_ext_ack *extack)
744{
745 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
746
747 if (port_index >= mlxsw_core->max_ports) {
748 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
749 return -EINVAL;
750 }
751 if (!mlxsw_core->driver->port_split)
752 return -EOPNOTSUPP;
753 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
754 extack);
755}
756
757static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
758 unsigned int port_index,
759 struct netlink_ext_ack *extack)
760{
761 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
762
763 if (port_index >= mlxsw_core->max_ports) {
764 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
765 return -EINVAL;
766 }
767 if (!mlxsw_core->driver->port_unsplit)
768 return -EOPNOTSUPP;
769 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
770 extack);
771}
772
773static int
774mlxsw_devlink_sb_pool_get(struct devlink *devlink,
775 unsigned int sb_index, u16 pool_index,
776 struct devlink_sb_pool_info *pool_info)
777{
778 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
779 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
780
781 if (!mlxsw_driver->sb_pool_get)
782 return -EOPNOTSUPP;
783 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
784 pool_index, pool_info);
785}
786
787static int
788mlxsw_devlink_sb_pool_set(struct devlink *devlink,
789 unsigned int sb_index, u16 pool_index, u32 size,
790 enum devlink_sb_threshold_type threshold_type,
791 struct netlink_ext_ack *extack)
792{
793 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
794 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
795
796 if (!mlxsw_driver->sb_pool_set)
797 return -EOPNOTSUPP;
798 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
799 pool_index, size, threshold_type,
800 extack);
801}
802
803static void *__dl_port(struct devlink_port *devlink_port)
804{
805 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
806}
807
808static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
809 enum devlink_port_type port_type)
810{
811 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
812 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
813 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
814
815 if (!mlxsw_driver->port_type_set)
816 return -EOPNOTSUPP;
817
818 return mlxsw_driver->port_type_set(mlxsw_core,
819 mlxsw_core_port->local_port,
820 port_type);
821}
822
823static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
824 unsigned int sb_index, u16 pool_index,
825 u32 *p_threshold)
826{
827 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
828 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
829 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
830
831 if (!mlxsw_driver->sb_port_pool_get ||
832 !mlxsw_core_port_check(mlxsw_core_port))
833 return -EOPNOTSUPP;
834 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
835 pool_index, p_threshold);
836}
837
838static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
839 unsigned int sb_index, u16 pool_index,
840 u32 threshold,
841 struct netlink_ext_ack *extack)
842{
843 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
844 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
845 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
846
847 if (!mlxsw_driver->sb_port_pool_set ||
848 !mlxsw_core_port_check(mlxsw_core_port))
849 return -EOPNOTSUPP;
850 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
851 pool_index, threshold, extack);
852}
853
854static int
855mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
856 unsigned int sb_index, u16 tc_index,
857 enum devlink_sb_pool_type pool_type,
858 u16 *p_pool_index, u32 *p_threshold)
859{
860 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
861 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
862 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
863
864 if (!mlxsw_driver->sb_tc_pool_bind_get ||
865 !mlxsw_core_port_check(mlxsw_core_port))
866 return -EOPNOTSUPP;
867 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
868 tc_index, pool_type,
869 p_pool_index, p_threshold);
870}
871
872static int
873mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
874 unsigned int sb_index, u16 tc_index,
875 enum devlink_sb_pool_type pool_type,
876 u16 pool_index, u32 threshold,
877 struct netlink_ext_ack *extack)
878{
879 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
880 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
881 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
882
883 if (!mlxsw_driver->sb_tc_pool_bind_set ||
884 !mlxsw_core_port_check(mlxsw_core_port))
885 return -EOPNOTSUPP;
886 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
887 tc_index, pool_type,
888 pool_index, threshold, extack);
889}
890
891static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
892 unsigned int sb_index)
893{
894 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
895 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
896
897 if (!mlxsw_driver->sb_occ_snapshot)
898 return -EOPNOTSUPP;
899 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
900}
901
902static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
903 unsigned int sb_index)
904{
905 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
906 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
907
908 if (!mlxsw_driver->sb_occ_max_clear)
909 return -EOPNOTSUPP;
910 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
911}
912
913static int
914mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
915 unsigned int sb_index, u16 pool_index,
916 u32 *p_cur, u32 *p_max)
917{
918 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
919 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
920 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
921
922 if (!mlxsw_driver->sb_occ_port_pool_get ||
923 !mlxsw_core_port_check(mlxsw_core_port))
924 return -EOPNOTSUPP;
925 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
926 pool_index, p_cur, p_max);
927}
928
929static int
930mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
931 unsigned int sb_index, u16 tc_index,
932 enum devlink_sb_pool_type pool_type,
933 u32 *p_cur, u32 *p_max)
934{
935 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
936 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
937 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
938
939 if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
940 !mlxsw_core_port_check(mlxsw_core_port))
941 return -EOPNOTSUPP;
942 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
943 sb_index, tc_index,
944 pool_type, p_cur, p_max);
945}
946
947static int
948mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
949 struct netlink_ext_ack *extack)
950{
951 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
952 char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
953 u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
954 char mgir_pl[MLXSW_REG_MGIR_LEN];
955 char buf[32];
956 int err;
957
958 err = devlink_info_driver_name_put(req,
959 mlxsw_core->bus_info->device_kind);
960 if (err)
961 return err;
962
963 mlxsw_reg_mgir_pack(mgir_pl);
964 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
965 if (err)
966 return err;
967 mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
968 &fw_minor, &fw_sub_minor);
969
970 sprintf(buf, "%X", hw_rev);
971 err = devlink_info_version_fixed_put(req, "hw.revision", buf);
972 if (err)
973 return err;
974
975 err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
976 if (err)
977 return err;
978
979 sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
980 err = devlink_info_version_running_put(req, "fw.version", buf);
981 if (err)
982 return err;
983
984 return 0;
985}
986
987static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
988 struct netlink_ext_ack *extack)
989{
990 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
991 int err;
992
993 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
994 return -EOPNOTSUPP;
995
996 mlxsw_core_bus_device_unregister(mlxsw_core, true);
997 err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
998 mlxsw_core->bus,
999 mlxsw_core->bus_priv, true,
1000 devlink);
1001 mlxsw_core->reload_fail = !!err;
1002
1003 return err;
1004}
1005
1006static int mlxsw_devlink_flash_update(struct devlink *devlink,
1007 const char *file_name,
1008 const char *component,
1009 struct netlink_ext_ack *extack)
1010{
1011 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1012 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1013
1014 if (!mlxsw_driver->flash_update)
1015 return -EOPNOTSUPP;
1016 return mlxsw_driver->flash_update(mlxsw_core, file_name,
1017 component, extack);
1018}
1019
1020static const struct devlink_ops mlxsw_devlink_ops = {
1021 .reload = mlxsw_devlink_core_bus_device_reload,
1022 .port_type_set = mlxsw_devlink_port_type_set,
1023 .port_split = mlxsw_devlink_port_split,
1024 .port_unsplit = mlxsw_devlink_port_unsplit,
1025 .sb_pool_get = mlxsw_devlink_sb_pool_get,
1026 .sb_pool_set = mlxsw_devlink_sb_pool_set,
1027 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
1028 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
1029 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
1030 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
1031 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
1032 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
1033 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
1034 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
1035 .info_get = mlxsw_devlink_info_get,
1036 .flash_update = mlxsw_devlink_flash_update,
1037};
1038
1039static int
1040__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1041 const struct mlxsw_bus *mlxsw_bus,
1042 void *bus_priv, bool reload,
1043 struct devlink *devlink)
1044{
1045 const char *device_kind = mlxsw_bus_info->device_kind;
1046 struct mlxsw_core *mlxsw_core;
1047 struct mlxsw_driver *mlxsw_driver;
1048 struct mlxsw_res *res;
1049 size_t alloc_size;
1050 int err;
1051
1052 mlxsw_driver = mlxsw_core_driver_get(device_kind);
1053 if (!mlxsw_driver)
1054 return -EINVAL;
1055
1056 if (!reload) {
1057 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
1058 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
1059 if (!devlink) {
1060 err = -ENOMEM;
1061 goto err_devlink_alloc;
1062 }
1063 }
1064
1065 mlxsw_core = devlink_priv(devlink);
1066 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1067 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1068 mlxsw_core->driver = mlxsw_driver;
1069 mlxsw_core->bus = mlxsw_bus;
1070 mlxsw_core->bus_priv = bus_priv;
1071 mlxsw_core->bus_info = mlxsw_bus_info;
1072
1073 res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
1074 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
1075 if (err)
1076 goto err_bus_init;
1077
1078 if (mlxsw_driver->resources_register && !reload) {
1079 err = mlxsw_driver->resources_register(mlxsw_core);
1080 if (err)
1081 goto err_register_resources;
1082 }
1083
1084 err = mlxsw_ports_init(mlxsw_core);
1085 if (err)
1086 goto err_ports_init;
1087
1088 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
1089 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
1090 alloc_size = sizeof(u8) *
1091 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
1092 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
1093 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1094 if (!mlxsw_core->lag.mapping) {
1095 err = -ENOMEM;
1096 goto err_alloc_lag_mapping;
1097 }
1098 }
1099
1100 err = mlxsw_emad_init(mlxsw_core);
1101 if (err)
1102 goto err_emad_init;
1103
1104 if (!reload) {
1105 err = devlink_register(devlink, mlxsw_bus_info->dev);
1106 if (err)
1107 goto err_devlink_register;
1108 }
1109
1110 if (mlxsw_driver->params_register && !reload) {
1111 err = mlxsw_driver->params_register(mlxsw_core);
1112 if (err)
1113 goto err_register_params;
1114 }
1115
1116 if (mlxsw_driver->init) {
1117 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
1118 if (err)
1119 goto err_driver_init;
1120 }
1121
1122 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1123 if (err)
1124 goto err_hwmon_init;
1125
1126 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
1127 &mlxsw_core->thermal);
1128 if (err)
1129 goto err_thermal_init;
1130
1131 if (mlxsw_driver->params_register && !reload)
1132 devlink_params_publish(devlink);
1133
1134 return 0;
1135
1136err_thermal_init:
1137 mlxsw_hwmon_fini(mlxsw_core->hwmon);
1138err_hwmon_init:
1139 if (mlxsw_core->driver->fini)
1140 mlxsw_core->driver->fini(mlxsw_core);
1141err_driver_init:
1142 if (mlxsw_driver->params_unregister && !reload)
1143 mlxsw_driver->params_unregister(mlxsw_core);
1144err_register_params:
1145 if (!reload)
1146 devlink_unregister(devlink);
1147err_devlink_register:
1148 mlxsw_emad_fini(mlxsw_core);
1149err_emad_init:
1150 kfree(mlxsw_core->lag.mapping);
1151err_alloc_lag_mapping:
1152 mlxsw_ports_fini(mlxsw_core);
1153err_ports_init:
1154 if (!reload)
1155 devlink_resources_unregister(devlink, NULL);
1156err_register_resources:
1157 mlxsw_bus->fini(bus_priv);
1158err_bus_init:
1159 if (!reload)
1160 devlink_free(devlink);
1161err_devlink_alloc:
1162 return err;
1163}
1164
1165int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1166 const struct mlxsw_bus *mlxsw_bus,
1167 void *bus_priv, bool reload,
1168 struct devlink *devlink)
1169{
1170 bool called_again = false;
1171 int err;
1172
1173again:
1174 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
1175 bus_priv, reload, devlink);
1176
1177
1178
1179
1180 if (err == -EAGAIN && !called_again) {
1181 called_again = true;
1182 goto again;
1183 }
1184
1185 return err;
1186}
1187EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1188
1189void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1190 bool reload)
1191{
1192 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1193
1194 if (mlxsw_core->reload_fail) {
1195 if (!reload)
1196
1197
1198
1199 goto reload_fail_deinit;
1200 else
1201 return;
1202 }
1203
1204 if (mlxsw_core->driver->params_unregister && !reload)
1205 devlink_params_unpublish(devlink);
1206 mlxsw_thermal_fini(mlxsw_core->thermal);
1207 mlxsw_hwmon_fini(mlxsw_core->hwmon);
1208 if (mlxsw_core->driver->fini)
1209 mlxsw_core->driver->fini(mlxsw_core);
1210 if (mlxsw_core->driver->params_unregister && !reload)
1211 mlxsw_core->driver->params_unregister(mlxsw_core);
1212 if (!reload)
1213 devlink_unregister(devlink);
1214 mlxsw_emad_fini(mlxsw_core);
1215 kfree(mlxsw_core->lag.mapping);
1216 mlxsw_ports_fini(mlxsw_core);
1217 if (!reload)
1218 devlink_resources_unregister(devlink, NULL);
1219 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1220
1221 return;
1222
1223reload_fail_deinit:
1224 if (mlxsw_core->driver->params_unregister)
1225 mlxsw_core->driver->params_unregister(mlxsw_core);
1226 devlink_unregister(devlink);
1227 devlink_resources_unregister(devlink, NULL);
1228 devlink_free(devlink);
1229}
1230EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1231
1232bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
1233 const struct mlxsw_tx_info *tx_info)
1234{
1235 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1236 tx_info);
1237}
1238EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1239
1240int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1241 const struct mlxsw_tx_info *tx_info)
1242{
1243 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1244 tx_info);
1245}
1246EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1247
1248void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
1249 struct sk_buff *skb, u8 local_port)
1250{
1251 if (mlxsw_core->driver->ptp_transmitted)
1252 mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
1253 local_port);
1254}
1255EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
1256
1257static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1258 const struct mlxsw_rx_listener *rxl_b)
1259{
1260 return (rxl_a->func == rxl_b->func &&
1261 rxl_a->local_port == rxl_b->local_port &&
1262 rxl_a->trap_id == rxl_b->trap_id);
1263}
1264
1265static struct mlxsw_rx_listener_item *
1266__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1267 const struct mlxsw_rx_listener *rxl,
1268 void *priv)
1269{
1270 struct mlxsw_rx_listener_item *rxl_item;
1271
1272 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1273 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1274 rxl_item->priv == priv)
1275 return rxl_item;
1276 }
1277 return NULL;
1278}
1279
1280int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1281 const struct mlxsw_rx_listener *rxl,
1282 void *priv)
1283{
1284 struct mlxsw_rx_listener_item *rxl_item;
1285
1286 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1287 if (rxl_item)
1288 return -EEXIST;
1289 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1290 if (!rxl_item)
1291 return -ENOMEM;
1292 rxl_item->rxl = *rxl;
1293 rxl_item->priv = priv;
1294
1295 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1296 return 0;
1297}
1298EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1299
1300void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1301 const struct mlxsw_rx_listener *rxl,
1302 void *priv)
1303{
1304 struct mlxsw_rx_listener_item *rxl_item;
1305
1306 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1307 if (!rxl_item)
1308 return;
1309 list_del_rcu(&rxl_item->list);
1310 synchronize_rcu();
1311 kfree(rxl_item);
1312}
1313EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1314
1315static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1316 void *priv)
1317{
1318 struct mlxsw_event_listener_item *event_listener_item = priv;
1319 struct mlxsw_reg_info reg;
1320 char *payload;
1321 char *op_tlv = mlxsw_emad_op_tlv(skb);
1322 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1323
1324 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1325 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1326 payload = mlxsw_emad_reg_payload(op_tlv);
1327 event_listener_item->el.func(®, payload, event_listener_item->priv);
1328 dev_kfree_skb(skb);
1329}
1330
1331static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1332 const struct mlxsw_event_listener *el_b)
1333{
1334 return (el_a->func == el_b->func &&
1335 el_a->trap_id == el_b->trap_id);
1336}
1337
1338static struct mlxsw_event_listener_item *
1339__find_event_listener_item(struct mlxsw_core *mlxsw_core,
1340 const struct mlxsw_event_listener *el,
1341 void *priv)
1342{
1343 struct mlxsw_event_listener_item *el_item;
1344
1345 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1346 if (__is_event_listener_equal(&el_item->el, el) &&
1347 el_item->priv == priv)
1348 return el_item;
1349 }
1350 return NULL;
1351}
1352
1353int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1354 const struct mlxsw_event_listener *el,
1355 void *priv)
1356{
1357 int err;
1358 struct mlxsw_event_listener_item *el_item;
1359 const struct mlxsw_rx_listener rxl = {
1360 .func = mlxsw_core_event_listener_func,
1361 .local_port = MLXSW_PORT_DONT_CARE,
1362 .trap_id = el->trap_id,
1363 };
1364
1365 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1366 if (el_item)
1367 return -EEXIST;
1368 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1369 if (!el_item)
1370 return -ENOMEM;
1371 el_item->el = *el;
1372 el_item->priv = priv;
1373
1374 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1375 if (err)
1376 goto err_rx_listener_register;
1377
1378
1379
1380
1381 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1382
1383 return 0;
1384
1385err_rx_listener_register:
1386 kfree(el_item);
1387 return err;
1388}
1389EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1390
1391void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1392 const struct mlxsw_event_listener *el,
1393 void *priv)
1394{
1395 struct mlxsw_event_listener_item *el_item;
1396 const struct mlxsw_rx_listener rxl = {
1397 .func = mlxsw_core_event_listener_func,
1398 .local_port = MLXSW_PORT_DONT_CARE,
1399 .trap_id = el->trap_id,
1400 };
1401
1402 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1403 if (!el_item)
1404 return;
1405 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1406 list_del(&el_item->list);
1407 kfree(el_item);
1408}
1409EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1410
1411static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
1412 const struct mlxsw_listener *listener,
1413 void *priv)
1414{
1415 if (listener->is_event)
1416 return mlxsw_core_event_listener_register(mlxsw_core,
1417 &listener->u.event_listener,
1418 priv);
1419 else
1420 return mlxsw_core_rx_listener_register(mlxsw_core,
1421 &listener->u.rx_listener,
1422 priv);
1423}
1424
1425static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
1426 const struct mlxsw_listener *listener,
1427 void *priv)
1428{
1429 if (listener->is_event)
1430 mlxsw_core_event_listener_unregister(mlxsw_core,
1431 &listener->u.event_listener,
1432 priv);
1433 else
1434 mlxsw_core_rx_listener_unregister(mlxsw_core,
1435 &listener->u.rx_listener,
1436 priv);
1437}
1438
1439int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
1440 const struct mlxsw_listener *listener, void *priv)
1441{
1442 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1443 int err;
1444
1445 err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
1446 if (err)
1447 return err;
1448
1449 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
1450 listener->trap_group, listener->is_ctrl);
1451 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1452 if (err)
1453 goto err_trap_set;
1454
1455 return 0;
1456
1457err_trap_set:
1458 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1459 return err;
1460}
1461EXPORT_SYMBOL(mlxsw_core_trap_register);
1462
1463void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
1464 const struct mlxsw_listener *listener,
1465 void *priv)
1466{
1467 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1468
1469 if (!listener->is_event) {
1470 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
1471 listener->trap_id, listener->trap_group,
1472 listener->is_ctrl);
1473 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1474 }
1475
1476 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1477}
1478EXPORT_SYMBOL(mlxsw_core_trap_unregister);
1479
1480static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1481{
1482 return atomic64_inc_return(&mlxsw_core->emad.tid);
1483}
1484
1485static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1486 const struct mlxsw_reg_info *reg,
1487 char *payload,
1488 enum mlxsw_core_reg_access_type type,
1489 struct list_head *bulk_list,
1490 mlxsw_reg_trans_cb_t *cb,
1491 unsigned long cb_priv)
1492{
1493 u64 tid = mlxsw_core_tid_get(mlxsw_core);
1494 struct mlxsw_reg_trans *trans;
1495 int err;
1496
1497 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1498 if (!trans)
1499 return -ENOMEM;
1500
1501 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1502 bulk_list, cb, cb_priv, tid);
1503 if (err) {
1504 kfree(trans);
1505 return err;
1506 }
1507 return 0;
1508}
1509
1510int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1511 const struct mlxsw_reg_info *reg, char *payload,
1512 struct list_head *bulk_list,
1513 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1514{
1515 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1516 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1517 bulk_list, cb, cb_priv);
1518}
1519EXPORT_SYMBOL(mlxsw_reg_trans_query);
1520
1521int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1522 const struct mlxsw_reg_info *reg, char *payload,
1523 struct list_head *bulk_list,
1524 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1525{
1526 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1527 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1528 bulk_list, cb, cb_priv);
1529}
1530EXPORT_SYMBOL(mlxsw_reg_trans_write);
1531
1532static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1533{
1534 struct mlxsw_core *mlxsw_core = trans->core;
1535 int err;
1536
1537 wait_for_completion(&trans->completion);
1538 cancel_delayed_work_sync(&trans->timeout_dw);
1539 err = trans->err;
1540
1541 if (trans->retries)
1542 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1543 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1544 if (err) {
1545 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1546 trans->tid, trans->reg->id,
1547 mlxsw_reg_id_str(trans->reg->id),
1548 mlxsw_core_reg_access_type_str(trans->type),
1549 trans->emad_status,
1550 mlxsw_emad_op_tlv_status_str(trans->emad_status));
1551 trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
1552 trans->emad_status,
1553 mlxsw_emad_op_tlv_status_str(trans->emad_status));
1554 }
1555
1556 list_del(&trans->bulk_list);
1557 kfree_rcu(trans, rcu);
1558 return err;
1559}
1560
1561int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1562{
1563 struct mlxsw_reg_trans *trans;
1564 struct mlxsw_reg_trans *tmp;
1565 int sum_err = 0;
1566 int err;
1567
1568 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1569 err = mlxsw_reg_trans_wait(trans);
1570 if (err && sum_err == 0)
1571 sum_err = err;
1572 }
1573 return sum_err;
1574}
1575EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1576
1577static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1578 const struct mlxsw_reg_info *reg,
1579 char *payload,
1580 enum mlxsw_core_reg_access_type type)
1581{
1582 enum mlxsw_emad_op_tlv_status status;
1583 int err, n_retry;
1584 bool reset_ok;
1585 char *in_mbox, *out_mbox, *tmp;
1586
1587 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1588 reg->id, mlxsw_reg_id_str(reg->id),
1589 mlxsw_core_reg_access_type_str(type));
1590
1591 in_mbox = mlxsw_cmd_mbox_alloc();
1592 if (!in_mbox)
1593 return -ENOMEM;
1594
1595 out_mbox = mlxsw_cmd_mbox_alloc();
1596 if (!out_mbox) {
1597 err = -ENOMEM;
1598 goto free_in_mbox;
1599 }
1600
1601 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1602 mlxsw_core_tid_get(mlxsw_core));
1603 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1604 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1605
1606
1607
1608
1609
1610
1611 reset_ok = reg->id == MLXSW_REG_MRSR_ID;
1612
1613 n_retry = 0;
1614retry:
1615 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
1616 if (!err) {
1617 err = mlxsw_emad_process_status(out_mbox, &status);
1618 if (err) {
1619 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1620 goto retry;
1621 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1622 status, mlxsw_emad_op_tlv_status_str(status));
1623 }
1624 }
1625
1626 if (!err)
1627 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1628 reg->len);
1629
1630 mlxsw_cmd_mbox_free(out_mbox);
1631free_in_mbox:
1632 mlxsw_cmd_mbox_free(in_mbox);
1633 if (err)
1634 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1635 reg->id, mlxsw_reg_id_str(reg->id),
1636 mlxsw_core_reg_access_type_str(type));
1637 return err;
1638}
1639
1640static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1641 char *payload, size_t payload_len,
1642 unsigned long cb_priv)
1643{
1644 char *orig_payload = (char *) cb_priv;
1645
1646 memcpy(orig_payload, payload, payload_len);
1647}
1648
1649static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1650 const struct mlxsw_reg_info *reg,
1651 char *payload,
1652 enum mlxsw_core_reg_access_type type)
1653{
1654 LIST_HEAD(bulk_list);
1655 int err;
1656
1657
1658
1659
1660
1661 if (!mlxsw_core->emad.use_emad)
1662 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1663 payload, type);
1664
1665 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1666 payload, type, &bulk_list,
1667 mlxsw_core_reg_access_cb,
1668 (unsigned long) payload);
1669 if (err)
1670 return err;
1671 return mlxsw_reg_trans_bulk_wait(&bulk_list);
1672}
1673
1674int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1675 const struct mlxsw_reg_info *reg, char *payload)
1676{
1677 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1678 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1679}
1680EXPORT_SYMBOL(mlxsw_reg_query);
1681
1682int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1683 const struct mlxsw_reg_info *reg, char *payload)
1684{
1685 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1686 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1687}
1688EXPORT_SYMBOL(mlxsw_reg_write);
1689
1690void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1691 struct mlxsw_rx_info *rx_info)
1692{
1693 struct mlxsw_rx_listener_item *rxl_item;
1694 const struct mlxsw_rx_listener *rxl;
1695 u8 local_port;
1696 bool found = false;
1697
1698 if (rx_info->is_lag) {
1699 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1700 __func__, rx_info->u.lag_id,
1701 rx_info->trap_id);
1702
1703
1704
1705 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1706 rx_info->u.lag_id,
1707 rx_info->lag_port_index);
1708 } else {
1709 local_port = rx_info->u.sys_port;
1710 }
1711
1712 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1713 __func__, local_port, rx_info->trap_id);
1714
1715 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1716 (local_port >= mlxsw_core->max_ports))
1717 goto drop;
1718
1719 rcu_read_lock();
1720 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1721 rxl = &rxl_item->rxl;
1722 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1723 rxl->local_port == local_port) &&
1724 rxl->trap_id == rx_info->trap_id) {
1725 found = true;
1726 break;
1727 }
1728 }
1729 rcu_read_unlock();
1730 if (!found)
1731 goto drop;
1732
1733 rxl->func(skb, local_port, rxl_item->priv);
1734 return;
1735
1736drop:
1737 dev_kfree_skb(skb);
1738}
1739EXPORT_SYMBOL(mlxsw_core_skb_receive);
1740
1741static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1742 u16 lag_id, u8 port_index)
1743{
1744 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
1745 port_index;
1746}
1747
1748void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1749 u16 lag_id, u8 port_index, u8 local_port)
1750{
1751 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1752 lag_id, port_index);
1753
1754 mlxsw_core->lag.mapping[index] = local_port;
1755}
1756EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1757
1758u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1759 u16 lag_id, u8 port_index)
1760{
1761 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1762 lag_id, port_index);
1763
1764 return mlxsw_core->lag.mapping[index];
1765}
1766EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1767
1768void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1769 u16 lag_id, u8 local_port)
1770{
1771 int i;
1772
1773 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
1774 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1775 lag_id, i);
1776
1777 if (mlxsw_core->lag.mapping[index] == local_port)
1778 mlxsw_core->lag.mapping[index] = 0;
1779 }
1780}
1781EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1782
1783bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
1784 enum mlxsw_res_id res_id)
1785{
1786 return mlxsw_res_valid(&mlxsw_core->res, res_id);
1787}
1788EXPORT_SYMBOL(mlxsw_core_res_valid);
1789
1790u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
1791 enum mlxsw_res_id res_id)
1792{
1793 return mlxsw_res_get(&mlxsw_core->res, res_id);
1794}
1795EXPORT_SYMBOL(mlxsw_core_res_get);
1796
1797int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
1798 u32 port_number, bool split,
1799 u32 split_port_subnumber,
1800 const unsigned char *switch_id,
1801 unsigned char switch_id_len)
1802{
1803 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1804 struct mlxsw_core_port *mlxsw_core_port =
1805 &mlxsw_core->ports[local_port];
1806 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1807 int err;
1808
1809 mlxsw_core_port->local_port = local_port;
1810 devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
1811 port_number, split, split_port_subnumber,
1812 switch_id, switch_id_len);
1813 err = devlink_port_register(devlink, devlink_port, local_port);
1814 if (err)
1815 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1816 return err;
1817}
1818EXPORT_SYMBOL(mlxsw_core_port_init);
1819
1820void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
1821{
1822 struct mlxsw_core_port *mlxsw_core_port =
1823 &mlxsw_core->ports[local_port];
1824 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1825
1826 devlink_port_unregister(devlink_port);
1827 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1828}
1829EXPORT_SYMBOL(mlxsw_core_port_fini);
1830
1831void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1832 void *port_driver_priv, struct net_device *dev)
1833{
1834 struct mlxsw_core_port *mlxsw_core_port =
1835 &mlxsw_core->ports[local_port];
1836 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1837
1838 mlxsw_core_port->port_driver_priv = port_driver_priv;
1839 devlink_port_type_eth_set(devlink_port, dev);
1840}
1841EXPORT_SYMBOL(mlxsw_core_port_eth_set);
1842
1843void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1844 void *port_driver_priv)
1845{
1846 struct mlxsw_core_port *mlxsw_core_port =
1847 &mlxsw_core->ports[local_port];
1848 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1849
1850 mlxsw_core_port->port_driver_priv = port_driver_priv;
1851 devlink_port_type_ib_set(devlink_port, NULL);
1852}
1853EXPORT_SYMBOL(mlxsw_core_port_ib_set);
1854
1855void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
1856 void *port_driver_priv)
1857{
1858 struct mlxsw_core_port *mlxsw_core_port =
1859 &mlxsw_core->ports[local_port];
1860 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1861
1862 mlxsw_core_port->port_driver_priv = port_driver_priv;
1863 devlink_port_type_clear(devlink_port);
1864}
1865EXPORT_SYMBOL(mlxsw_core_port_clear);
1866
1867enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
1868 u8 local_port)
1869{
1870 struct mlxsw_core_port *mlxsw_core_port =
1871 &mlxsw_core->ports[local_port];
1872 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1873
1874 return devlink_port->type;
1875}
1876EXPORT_SYMBOL(mlxsw_core_port_type_get);
1877
1878
1879struct devlink_port *
1880mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
1881 u8 local_port)
1882{
1883 struct mlxsw_core_port *mlxsw_core_port =
1884 &mlxsw_core->ports[local_port];
1885 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1886
1887 return devlink_port;
1888}
1889EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
1890
1891static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
1892 const char *buf, size_t size)
1893{
1894 __be32 *m = (__be32 *) buf;
1895 int i;
1896 int count = size / sizeof(__be32);
1897
1898 for (i = count - 1; i >= 0; i--)
1899 if (m[i])
1900 break;
1901 i++;
1902 count = i ? i : 1;
1903 for (i = 0; i < count; i += 4)
1904 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
1905 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
1906 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
1907}
1908
1909int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1910 u32 in_mod, bool out_mbox_direct, bool reset_ok,
1911 char *in_mbox, size_t in_mbox_size,
1912 char *out_mbox, size_t out_mbox_size)
1913{
1914 u8 status;
1915 int err;
1916
1917 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1918 if (!mlxsw_core->bus->cmd_exec)
1919 return -EOPNOTSUPP;
1920
1921 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1922 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1923 if (in_mbox) {
1924 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1925 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1926 }
1927
1928 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1929 opcode_mod, in_mod, out_mbox_direct,
1930 in_mbox, in_mbox_size,
1931 out_mbox, out_mbox_size, &status);
1932
1933 if (!err && out_mbox) {
1934 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1935 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1936 }
1937
1938 if (reset_ok && err == -EIO &&
1939 status == MLXSW_CMD_STATUS_RUNNING_RESET) {
1940 err = 0;
1941 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1942 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1943 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1944 in_mod, status, mlxsw_cmd_status_str(status));
1945 } else if (err == -ETIMEDOUT) {
1946 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1947 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1948 in_mod);
1949 }
1950
1951 return err;
1952}
1953EXPORT_SYMBOL(mlxsw_cmd_exec);
1954
1955int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
1956{
1957 return queue_delayed_work(mlxsw_wq, dwork, delay);
1958}
1959EXPORT_SYMBOL(mlxsw_core_schedule_dw);
1960
1961bool mlxsw_core_schedule_work(struct work_struct *work)
1962{
1963 return queue_work(mlxsw_owq, work);
1964}
1965EXPORT_SYMBOL(mlxsw_core_schedule_work);
1966
1967void mlxsw_core_flush_owq(void)
1968{
1969 flush_workqueue(mlxsw_owq);
1970}
1971EXPORT_SYMBOL(mlxsw_core_flush_owq);
1972
1973int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
1974 const struct mlxsw_config_profile *profile,
1975 u64 *p_single_size, u64 *p_double_size,
1976 u64 *p_linear_size)
1977{
1978 struct mlxsw_driver *driver = mlxsw_core->driver;
1979
1980 if (!driver->kvd_sizes_get)
1981 return -EINVAL;
1982
1983 return driver->kvd_sizes_get(mlxsw_core, profile,
1984 p_single_size, p_double_size,
1985 p_linear_size);
1986}
1987EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
1988
1989void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
1990{
1991 mlxsw_core->fw_flash_in_progress = true;
1992}
1993EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
1994
1995void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
1996{
1997 mlxsw_core->fw_flash_in_progress = false;
1998}
1999EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
2000
2001int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
2002 struct mlxsw_res *res)
2003{
2004 int index, i;
2005 u64 data;
2006 u16 id;
2007 int err;
2008
2009 if (!res)
2010 return 0;
2011
2012 mlxsw_cmd_mbox_zero(mbox);
2013
2014 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
2015 index++) {
2016 err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index);
2017 if (err)
2018 return err;
2019
2020 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
2021 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
2022 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
2023
2024 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
2025 return 0;
2026
2027 mlxsw_res_parse(res, id, data);
2028 }
2029 }
2030
2031
2032
2033
2034 return -EIO;
2035}
2036EXPORT_SYMBOL(mlxsw_core_resources_query);
2037
2038u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
2039{
2040 return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
2041}
2042EXPORT_SYMBOL(mlxsw_core_read_frc_h);
2043
2044u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
2045{
2046 return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
2047}
2048EXPORT_SYMBOL(mlxsw_core_read_frc_l);
2049
2050static int __init mlxsw_core_module_init(void)
2051{
2052 int err;
2053
2054 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
2055 if (!mlxsw_wq)
2056 return -ENOMEM;
2057 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
2058 mlxsw_core_driver_name);
2059 if (!mlxsw_owq) {
2060 err = -ENOMEM;
2061 goto err_alloc_ordered_workqueue;
2062 }
2063 return 0;
2064
2065err_alloc_ordered_workqueue:
2066 destroy_workqueue(mlxsw_wq);
2067 return err;
2068}
2069
2070static void __exit mlxsw_core_module_exit(void)
2071{
2072 destroy_workqueue(mlxsw_owq);
2073 destroy_workqueue(mlxsw_wq);
2074}
2075
2076module_init(mlxsw_core_module_init);
2077module_exit(mlxsw_core_module_exit);
2078
2079MODULE_LICENSE("Dual BSD/GPL");
2080MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
2081MODULE_DESCRIPTION("Mellanox switch device core driver");
2082