1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/pci.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/phy.h>
18#include <linux/of.h>
19#include <linux/of_mdio.h>
20#include <linux/of_net.h>
21
22#include "cgx.h"
23
24#define DRV_NAME "octeontx2-cgx"
25#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41struct lmac {
42 wait_queue_head_t wq_cmd_cmplt;
43 struct mutex cmd_lock;
44 u64 resp;
45 struct cgx_link_user_info link_info;
46 struct cgx_event_cb event_cb;
47 spinlock_t event_cb_lock;
48 bool cmd_pend;
49 struct cgx *cgx;
50 u8 lmac_id;
51 char *name;
52};
53
54struct cgx {
55 void __iomem *reg_base;
56 struct pci_dev *pdev;
57 u8 cgx_id;
58 u8 lmac_count;
59 struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
60 struct work_struct cgx_cmd_work;
61 struct workqueue_struct *cgx_cmd_workq;
62 struct list_head cgx_list;
63};
64
65static LIST_HEAD(cgx_list);
66
67
68static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
69
70
71static char *cgx_lmactype_string[LMAC_MODE_MAX];
72
73
74static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
75
76
77static const struct pci_device_id cgx_id_table[] = {
78 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
79 { 0, }
80};
81
82MODULE_DEVICE_TABLE(pci, cgx_id_table);
83
84static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
85{
86 writeq(val, cgx->reg_base + (lmac << 18) + offset);
87}
88
89static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
90{
91 return readq(cgx->reg_base + (lmac << 18) + offset);
92}
93
94static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
95{
96 if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
97 return NULL;
98
99 return cgx->lmac_idmap[lmac_id];
100}
101
102int cgx_get_cgxcnt_max(void)
103{
104 struct cgx *cgx_dev;
105 int idmax = -ENODEV;
106
107 list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
108 if (cgx_dev->cgx_id > idmax)
109 idmax = cgx_dev->cgx_id;
110
111 if (idmax < 0)
112 return 0;
113
114 return idmax + 1;
115}
116EXPORT_SYMBOL(cgx_get_cgxcnt_max);
117
118int cgx_get_lmac_cnt(void *cgxd)
119{
120 struct cgx *cgx = cgxd;
121
122 if (!cgx)
123 return -ENODEV;
124
125 return cgx->lmac_count;
126}
127EXPORT_SYMBOL(cgx_get_lmac_cnt);
128
129void *cgx_get_pdata(int cgx_id)
130{
131 struct cgx *cgx_dev;
132
133 list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
134 if (cgx_dev->cgx_id == cgx_id)
135 return cgx_dev;
136 }
137 return NULL;
138}
139EXPORT_SYMBOL(cgx_get_pdata);
140
141int cgx_get_cgxid(void *cgxd)
142{
143 struct cgx *cgx = cgxd;
144
145 if (!cgx)
146 return -EINVAL;
147
148 return cgx->cgx_id;
149}
150
151
152
153
154
155
156int cgx_get_link_info(void *cgxd, int lmac_id,
157 struct cgx_link_user_info *linfo)
158{
159 struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
160
161 if (!lmac)
162 return -ENODEV;
163
164 *linfo = lmac->link_info;
165 return 0;
166}
167EXPORT_SYMBOL(cgx_get_link_info);
168
169static u64 mac2u64 (u8 *mac_addr)
170{
171 u64 mac = 0;
172 int index;
173
174 for (index = ETH_ALEN - 1; index >= 0; index--)
175 mac |= ((u64)*mac_addr++) << (8 * index);
176 return mac;
177}
178
179int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
180{
181 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
182 u64 cfg;
183
184
185
186
187 cfg = mac2u64 (mac_addr);
188
189 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
190 cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
191
192 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
193 cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
194 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
195
196 return 0;
197}
198EXPORT_SYMBOL(cgx_lmac_addr_set);
199
200u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
201{
202 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
203 u64 cfg;
204
205 cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
206 return cfg & CGX_RX_DMAC_ADR_MASK;
207}
208EXPORT_SYMBOL(cgx_lmac_addr_get);
209
210int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
211{
212 struct cgx *cgx = cgxd;
213
214 if (!cgx || lmac_id >= cgx->lmac_count)
215 return -ENODEV;
216
217 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
218 return 0;
219}
220EXPORT_SYMBOL(cgx_set_pkind);
221
222static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
223{
224 u64 cfg;
225
226 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
227 return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
228}
229
230
231int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
232{
233 struct cgx *cgx = cgxd;
234 u8 lmac_type;
235 u64 cfg;
236
237 if (!cgx || lmac_id >= cgx->lmac_count)
238 return -ENODEV;
239
240 lmac_type = cgx_get_lmac_type(cgx, lmac_id);
241 if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
242 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
243 if (enable)
244 cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
245 else
246 cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
247 cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
248 } else {
249 cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
250 if (enable)
251 cfg |= CGXX_SPUX_CONTROL1_LBK;
252 else
253 cfg &= ~CGXX_SPUX_CONTROL1_LBK;
254 cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
255 }
256 return 0;
257}
258EXPORT_SYMBOL(cgx_lmac_internal_loopback);
259
260void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
261{
262 struct cgx *cgx = cgx_get_pdata(cgx_id);
263 u64 cfg = 0;
264
265 if (!cgx)
266 return;
267
268 if (enable) {
269
270 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
271 cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
272 cfg |= CGX_DMAC_BCAST_MODE;
273 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
274
275 cfg = cgx_read(cgx, 0,
276 (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
277 cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
278 cgx_write(cgx, 0,
279 (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
280 } else {
281
282 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
283 cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
284 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
285 cfg = cgx_read(cgx, 0,
286 (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
287 cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
288 cgx_write(cgx, 0,
289 (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
290 }
291}
292EXPORT_SYMBOL(cgx_lmac_promisc_config);
293
294
295void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
296{
297 struct cgx *cgx = cgxd;
298 u64 cfg;
299
300 if (!cgx)
301 return;
302
303 if (enable) {
304 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
305 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
306 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
307
308 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
309 cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
310 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
311 } else {
312 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
313 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
314 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
315
316 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
317 cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
318 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
319 }
320}
321EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding);
322
323int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
324{
325 struct cgx *cgx = cgxd;
326
327 if (!cgx || lmac_id >= cgx->lmac_count)
328 return -ENODEV;
329 *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
330 return 0;
331}
332EXPORT_SYMBOL(cgx_get_rx_stats);
333
334int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
335{
336 struct cgx *cgx = cgxd;
337
338 if (!cgx || lmac_id >= cgx->lmac_count)
339 return -ENODEV;
340 *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
341 return 0;
342}
343EXPORT_SYMBOL(cgx_get_tx_stats);
344
345int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
346{
347 struct cgx *cgx = cgxd;
348 u64 cfg;
349
350 if (!cgx || lmac_id >= cgx->lmac_count)
351 return -ENODEV;
352
353 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
354 if (enable)
355 cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
356 else
357 cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
358 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
359 return 0;
360}
361EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
362
363int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
364{
365 struct cgx *cgx = cgxd;
366 u64 cfg, last;
367
368 if (!cgx || lmac_id >= cgx->lmac_count)
369 return -ENODEV;
370
371 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
372 last = cfg;
373 if (enable)
374 cfg |= DATA_PKT_TX_EN;
375 else
376 cfg &= ~DATA_PKT_TX_EN;
377
378 if (cfg != last)
379 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
380 return !!(last & DATA_PKT_TX_EN);
381}
382EXPORT_SYMBOL(cgx_lmac_tx_enable);
383
384
385static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
386{
387 struct cgx *cgx = lmac->cgx;
388 struct device *dev;
389 int err = 0;
390 u64 cmd;
391
392
393 err = mutex_lock_interruptible(&lmac->cmd_lock);
394 if (err)
395 return err;
396
397
398 cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG);
399 if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
400 err = -EBUSY;
401 goto unlock;
402 }
403
404
405 req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
406
407
408 lmac->cmd_pend = true;
409
410
411 cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
412
413
414 if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
415 msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
416 dev = &cgx->pdev->dev;
417 dev_err(dev, "cgx port %d:%d cmd timeout\n",
418 cgx->cgx_id, lmac->lmac_id);
419 err = -EIO;
420 goto unlock;
421 }
422
423
424 smp_rmb();
425 *resp = lmac->resp;
426
427unlock:
428 mutex_unlock(&lmac->cmd_lock);
429
430 return err;
431}
432
433static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
434 struct cgx *cgx, int lmac_id)
435{
436 struct lmac *lmac;
437 int err;
438
439 lmac = lmac_pdata(lmac_id, cgx);
440 if (!lmac)
441 return -ENODEV;
442
443 err = cgx_fwi_cmd_send(req, resp, lmac);
444
445
446 if (!err) {
447 if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
448 return -EIO;
449 else
450 return 0;
451 }
452
453 return err;
454}
455
456static inline void cgx_link_usertable_init(void)
457{
458 cgx_speed_mbps[CGX_LINK_NONE] = 0;
459 cgx_speed_mbps[CGX_LINK_10M] = 10;
460 cgx_speed_mbps[CGX_LINK_100M] = 100;
461 cgx_speed_mbps[CGX_LINK_1G] = 1000;
462 cgx_speed_mbps[CGX_LINK_2HG] = 2500;
463 cgx_speed_mbps[CGX_LINK_5G] = 5000;
464 cgx_speed_mbps[CGX_LINK_10G] = 10000;
465 cgx_speed_mbps[CGX_LINK_20G] = 20000;
466 cgx_speed_mbps[CGX_LINK_25G] = 25000;
467 cgx_speed_mbps[CGX_LINK_40G] = 40000;
468 cgx_speed_mbps[CGX_LINK_50G] = 50000;
469 cgx_speed_mbps[CGX_LINK_100G] = 100000;
470
471 cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
472 cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
473 cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
474 cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
475 cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
476 cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
477 cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
478 cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
479 cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
480 cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
481}
482
483static inline void link_status_user_format(u64 lstat,
484 struct cgx_link_user_info *linfo,
485 struct cgx *cgx, u8 lmac_id)
486{
487 char *lmac_string;
488
489 linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
490 linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
491 linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
492 linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
493 lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
494 strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
495}
496
497
498static inline void cgx_link_change_handler(u64 lstat,
499 struct lmac *lmac)
500{
501 struct cgx_link_user_info *linfo;
502 struct cgx *cgx = lmac->cgx;
503 struct cgx_link_event event;
504 struct device *dev;
505 int err_type;
506
507 dev = &cgx->pdev->dev;
508
509 link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
510 err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
511
512 event.cgx_id = cgx->cgx_id;
513 event.lmac_id = lmac->lmac_id;
514
515
516 lmac->link_info = event.link_uinfo;
517 linfo = &lmac->link_info;
518
519
520 spin_lock(&lmac->event_cb_lock);
521
522 if (!lmac->event_cb.notify_link_chg) {
523 dev_dbg(dev, "cgx port %d:%d Link change handler null",
524 cgx->cgx_id, lmac->lmac_id);
525 if (err_type != CGX_ERR_NONE) {
526 dev_err(dev, "cgx port %d:%d Link error %d\n",
527 cgx->cgx_id, lmac->lmac_id, err_type);
528 }
529 dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
530 cgx->cgx_id, lmac->lmac_id,
531 linfo->link_up ? "UP" : "DOWN", linfo->speed);
532 goto err;
533 }
534
535 if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
536 dev_err(dev, "event notification failure\n");
537err:
538 spin_unlock(&lmac->event_cb_lock);
539}
540
541static inline bool cgx_cmdresp_is_linkevent(u64 event)
542{
543 u8 id;
544
545 id = FIELD_GET(EVTREG_ID, event);
546 if (id == CGX_CMD_LINK_BRING_UP ||
547 id == CGX_CMD_LINK_BRING_DOWN)
548 return true;
549 else
550 return false;
551}
552
553static inline bool cgx_event_is_linkevent(u64 event)
554{
555 if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
556 return true;
557 else
558 return false;
559}
560
561static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
562 struct cgx *cgx)
563{
564 u64 req = 0;
565 u64 resp;
566 int err;
567
568 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
569 err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
570 if (!err)
571 *prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
572
573 return err;
574}
575
576static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
577 struct cgx *cgx)
578{
579 u64 req = 0;
580 u64 resp;
581 int err;
582
583 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
584 err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
585 if (!err)
586 *prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
587
588 return err;
589}
590
591int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
592{
593 struct cgx *cgx_dev;
594 int err;
595
596 if (!addr || !size)
597 return -EINVAL;
598
599 cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
600 if (!cgx_dev)
601 return -ENXIO;
602
603 err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
604 if (err)
605 return -EIO;
606
607 err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
608 if (err)
609 return -EIO;
610
611 return 0;
612}
613EXPORT_SYMBOL(cgx_get_mkex_prfl_info);
614
615static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
616{
617 struct lmac *lmac = data;
618 struct cgx *cgx;
619 u64 event;
620
621 cgx = lmac->cgx;
622
623 event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
624
625 if (!FIELD_GET(EVTREG_ACK, event))
626 return IRQ_NONE;
627
628 switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
629 case CGX_EVT_CMD_RESP:
630
631
632
633 lmac->resp = event;
634
635 smp_wmb();
636
637
638
639
640 if (cgx_cmdresp_is_linkevent(event))
641 cgx_link_change_handler(event, lmac);
642
643
644 lmac->cmd_pend = false;
645 wake_up_interruptible(&lmac->wq_cmd_cmplt);
646 break;
647 case CGX_EVT_ASYNC:
648 if (cgx_event_is_linkevent(event))
649 cgx_link_change_handler(event, lmac);
650 break;
651 }
652
653
654
655
656
657 cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
658 cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
659
660 return IRQ_HANDLED;
661}
662
663
664
665
666int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
667{
668 struct cgx *cgx = cgxd;
669 struct lmac *lmac;
670
671 lmac = lmac_pdata(lmac_id, cgx);
672 if (!lmac)
673 return -ENODEV;
674
675 lmac->event_cb = *cb;
676
677 return 0;
678}
679EXPORT_SYMBOL(cgx_lmac_evh_register);
680
681int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
682{
683 struct lmac *lmac;
684 unsigned long flags;
685 struct cgx *cgx = cgxd;
686
687 lmac = lmac_pdata(lmac_id, cgx);
688 if (!lmac)
689 return -ENODEV;
690
691 spin_lock_irqsave(&lmac->event_cb_lock, flags);
692 lmac->event_cb.notify_link_chg = NULL;
693 lmac->event_cb.data = NULL;
694 spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
695
696 return 0;
697}
698EXPORT_SYMBOL(cgx_lmac_evh_unregister);
699
700static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
701{
702 u64 req = 0;
703 u64 resp;
704
705 if (enable)
706 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
707 else
708 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
709
710 return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
711}
712
713static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
714{
715 u64 req = 0;
716
717 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
718 return cgx_fwi_cmd_generic(req, resp, cgx, 0);
719}
720
721static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
722{
723 struct device *dev = &cgx->pdev->dev;
724 int major_ver, minor_ver;
725 u64 resp;
726 int err;
727
728 if (!cgx->lmac_count)
729 return 0;
730
731 err = cgx_fwi_read_version(&resp, cgx);
732 if (err)
733 return err;
734
735 major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
736 minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
737 dev_dbg(dev, "Firmware command interface version = %d.%d\n",
738 major_ver, minor_ver);
739 if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
740 minor_ver != CGX_FIRMWARE_MINOR_VER)
741 return -EIO;
742 else
743 return 0;
744}
745
746static void cgx_lmac_linkup_work(struct work_struct *work)
747{
748 struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
749 struct device *dev = &cgx->pdev->dev;
750 int i, err;
751
752
753 for (i = 0; i < cgx->lmac_count; i++) {
754 err = cgx_fwi_link_change(cgx, i, true);
755 if (err)
756 dev_info(dev, "cgx port %d:%d Link up command failed\n",
757 cgx->cgx_id, i);
758 }
759}
760
761int cgx_lmac_linkup_start(void *cgxd)
762{
763 struct cgx *cgx = cgxd;
764
765 if (!cgx)
766 return -ENODEV;
767
768 queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
769
770 return 0;
771}
772EXPORT_SYMBOL(cgx_lmac_linkup_start);
773
774static int cgx_lmac_init(struct cgx *cgx)
775{
776 struct lmac *lmac;
777 int i, err;
778
779 cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
780 if (cgx->lmac_count > MAX_LMAC_PER_CGX)
781 cgx->lmac_count = MAX_LMAC_PER_CGX;
782
783 for (i = 0; i < cgx->lmac_count; i++) {
784 lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
785 if (!lmac)
786 return -ENOMEM;
787 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
788 if (!lmac->name)
789 return -ENOMEM;
790 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
791 lmac->lmac_id = i;
792 lmac->cgx = cgx;
793 init_waitqueue_head(&lmac->wq_cmd_cmplt);
794 mutex_init(&lmac->cmd_lock);
795 spin_lock_init(&lmac->event_cb_lock);
796 err = request_irq(pci_irq_vector(cgx->pdev,
797 CGX_LMAC_FWI + i * 9),
798 cgx_fwi_event_handler, 0, lmac->name, lmac);
799 if (err)
800 return err;
801
802
803 cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
804 FW_CGX_INT);
805
806
807 cgx->lmac_idmap[i] = lmac;
808 }
809
810 return cgx_lmac_verify_fwi_version(cgx);
811}
812
813static int cgx_lmac_exit(struct cgx *cgx)
814{
815 struct lmac *lmac;
816 int i;
817
818 if (cgx->cgx_cmd_workq) {
819 flush_workqueue(cgx->cgx_cmd_workq);
820 destroy_workqueue(cgx->cgx_cmd_workq);
821 cgx->cgx_cmd_workq = NULL;
822 }
823
824
825 for (i = 0; i < cgx->lmac_count; i++) {
826 lmac = cgx->lmac_idmap[i];
827 if (!lmac)
828 continue;
829 free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
830 kfree(lmac->name);
831 kfree(lmac);
832 }
833
834 return 0;
835}
836
837static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
838{
839 struct device *dev = &pdev->dev;
840 struct cgx *cgx;
841 int err, nvec;
842
843 cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
844 if (!cgx)
845 return -ENOMEM;
846 cgx->pdev = pdev;
847
848 pci_set_drvdata(pdev, cgx);
849
850 err = pci_enable_device(pdev);
851 if (err) {
852 dev_err(dev, "Failed to enable PCI device\n");
853 pci_set_drvdata(pdev, NULL);
854 return err;
855 }
856
857 err = pci_request_regions(pdev, DRV_NAME);
858 if (err) {
859 dev_err(dev, "PCI request regions failed 0x%x\n", err);
860 goto err_disable_device;
861 }
862
863
864 cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
865 if (!cgx->reg_base) {
866 dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
867 err = -ENOMEM;
868 goto err_release_regions;
869 }
870
871 nvec = CGX_NVEC;
872 err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
873 if (err < 0 || err != nvec) {
874 dev_err(dev, "Request for %d msix vectors failed, err %d\n",
875 nvec, err);
876 goto err_release_regions;
877 }
878
879 cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
880 & CGX_ID_MASK;
881
882
883 INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
884 cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
885 if (!cgx->cgx_cmd_workq) {
886 dev_err(dev, "alloc workqueue failed for cgx cmd");
887 err = -ENOMEM;
888 goto err_free_irq_vectors;
889 }
890
891 list_add(&cgx->cgx_list, &cgx_list);
892
893 cgx_link_usertable_init();
894
895 err = cgx_lmac_init(cgx);
896 if (err)
897 goto err_release_lmac;
898
899 return 0;
900
901err_release_lmac:
902 cgx_lmac_exit(cgx);
903 list_del(&cgx->cgx_list);
904err_free_irq_vectors:
905 pci_free_irq_vectors(pdev);
906err_release_regions:
907 pci_release_regions(pdev);
908err_disable_device:
909 pci_disable_device(pdev);
910 pci_set_drvdata(pdev, NULL);
911 return err;
912}
913
914static void cgx_remove(struct pci_dev *pdev)
915{
916 struct cgx *cgx = pci_get_drvdata(pdev);
917
918 cgx_lmac_exit(cgx);
919 list_del(&cgx->cgx_list);
920 pci_free_irq_vectors(pdev);
921 pci_release_regions(pdev);
922 pci_disable_device(pdev);
923 pci_set_drvdata(pdev, NULL);
924}
925
926struct pci_driver cgx_driver = {
927 .name = DRV_NAME,
928 .id_table = cgx_id_table,
929 .probe = cgx_probe,
930 .remove = cgx_remove,
931};
932