1
2
3
4#include <linux/kernel.h>
5#include <linux/firmware.h>
6#include <linux/pci.h>
7#include <linux/vmalloc.h>
8#include <linux/delay.h>
9#include <linux/interrupt.h>
10#include <linux/sched.h>
11#include <linux/crc32.h>
12#include <linux/completion.h>
13#include <linux/spinlock.h>
14#include <linux/circ_buf.h>
15
16#include "pcie_priv.h"
17#include "topaz_pcie_regs.h"
18#include "topaz_pcie_ipc.h"
19#include "qtn_hw_ids.h"
20#include "core.h"
21#include "bus.h"
22#include "shm_ipc.h"
23#include "debug.h"
24
25#define TOPAZ_TX_BD_SIZE_DEFAULT 128
26
27struct qtnf_topaz_tx_bd {
28 __le32 addr;
29 __le32 info;
30} __packed;
31
32struct qtnf_topaz_rx_bd {
33 __le32 addr;
34 __le32 info;
35} __packed;
36
37struct qtnf_extra_bd_params {
38 __le32 param1;
39 __le32 param2;
40 __le32 param3;
41 __le32 param4;
42} __packed;
43
44#define QTNF_BD_PARAM_OFFSET(n) offsetof(struct qtnf_extra_bd_params, param##n)
45
46struct vmac_pkt_info {
47 __le32 addr;
48 __le32 info;
49};
50
51struct qtnf_topaz_bda {
52 __le16 bda_len;
53 __le16 bda_version;
54 __le32 bda_bootstate;
55 __le32 bda_dma_mask;
56 __le32 bda_dma_offset;
57 __le32 bda_flags;
58 __le32 bda_img;
59 __le32 bda_img_size;
60 __le32 bda_ep2h_irqstatus;
61 __le32 bda_h2ep_irqstatus;
62 __le32 bda_msi_addr;
63 u8 reserved1[56];
64 __le32 bda_flashsz;
65 u8 bda_boardname[PCIE_BDA_NAMELEN];
66 __le32 bda_pci_pre_status;
67 __le32 bda_pci_endian;
68 __le32 bda_pci_post_status;
69 __le32 bda_h2ep_txd_budget;
70 __le32 bda_ep2h_txd_budget;
71 __le32 bda_rc_rx_bd_base;
72 __le32 bda_rc_rx_bd_num;
73 __le32 bda_rc_tx_bd_base;
74 __le32 bda_rc_tx_bd_num;
75 u8 bda_ep_link_state;
76 u8 bda_rc_link_state;
77 u8 bda_rc_msi_enabled;
78 u8 reserved2;
79 __le32 bda_ep_next_pkt;
80 struct vmac_pkt_info request[QTN_PCIE_RC_TX_QUEUE_LEN];
81 struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096);
82 struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096);
83} __packed;
84
85struct qtnf_pcie_topaz_state {
86 struct qtnf_pcie_bus_priv base;
87 struct qtnf_topaz_bda __iomem *bda;
88
89 dma_addr_t dma_msi_dummy;
90 u32 dma_msi_imwr;
91
92 struct qtnf_topaz_tx_bd *tx_bd_vbase;
93 struct qtnf_topaz_rx_bd *rx_bd_vbase;
94
95 __le32 __iomem *ep_next_rx_pkt;
96 __le32 __iomem *txqueue_wake;
97 __le32 __iomem *ep_pmstate;
98
99 unsigned long rx_pkt_count;
100};
101
102static void qtnf_deassert_intx(struct qtnf_pcie_topaz_state *ts)
103{
104 void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
105 u32 cfg;
106
107 cfg = readl(reg);
108 cfg &= ~TOPAZ_ASSERT_INTX;
109 qtnf_non_posted_write(cfg, reg);
110}
111
112static inline int qtnf_topaz_intx_asserted(struct qtnf_pcie_topaz_state *ts)
113{
114 void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
115 u32 cfg = readl(reg);
116
117 return !!(cfg & TOPAZ_ASSERT_INTX);
118}
119
120static void qtnf_topaz_reset_ep(struct qtnf_pcie_topaz_state *ts)
121{
122 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RST_EP_IRQ),
123 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
124 msleep(QTN_EP_RESET_WAIT_MS);
125 pci_restore_state(ts->base.pdev);
126}
127
128static void setup_rx_irqs(struct qtnf_pcie_topaz_state *ts)
129{
130 void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
131
132 ts->dma_msi_imwr = readl(reg);
133}
134
135static void enable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
136{
137 void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
138
139 qtnf_non_posted_write(ts->dma_msi_imwr, reg);
140}
141
142static void disable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
143{
144 void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
145
146 qtnf_non_posted_write(QTN_HOST_LO32(ts->dma_msi_dummy), reg);
147}
148
149static void qtnf_topaz_ipc_gen_ep_int(void *arg)
150{
151 struct qtnf_pcie_topaz_state *ts = arg;
152
153 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_CTRL_IRQ),
154 TOPAZ_CTL_M2L_INT(ts->base.sysctl_bar));
155}
156
157static int qtnf_is_state(__le32 __iomem *reg, u32 state)
158{
159 u32 s = readl(reg);
160
161 return (s == state);
162}
163
164static void qtnf_set_state(__le32 __iomem *reg, u32 state)
165{
166 qtnf_non_posted_write(state, reg);
167}
168
169static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
170{
171 u32 timeout = 0;
172
173 while ((qtnf_is_state(reg, state) == 0)) {
174 usleep_range(1000, 1200);
175 if (++timeout > delay_in_ms)
176 return -1;
177 }
178
179 return 0;
180}
181
182static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts,
183 struct qtnf_topaz_bda __iomem *bda)
184{
185 struct qtnf_extra_bd_params __iomem *extra_params;
186 struct qtnf_pcie_bus_priv *priv = &ts->base;
187 dma_addr_t paddr;
188 void *vaddr;
189 int len;
190 int i;
191
192
193
194 len = priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd) +
195 priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd) +
196 sizeof(struct qtnf_extra_bd_params);
197
198 vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
199 if (!vaddr)
200 return -ENOMEM;
201
202 memset(vaddr, 0, len);
203
204
205
206 ts->tx_bd_vbase = vaddr;
207 qtnf_non_posted_write(paddr, &bda->bda_rc_tx_bd_base);
208
209 for (i = 0; i < priv->tx_bd_num; i++)
210 ts->tx_bd_vbase[i].info |= cpu_to_le32(QTN_BD_EMPTY);
211
212 pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
213
214 priv->tx_bd_r_index = 0;
215 priv->tx_bd_w_index = 0;
216
217
218
219 vaddr = ((struct qtnf_topaz_tx_bd *)vaddr) + priv->tx_bd_num;
220 paddr += priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd);
221
222 ts->rx_bd_vbase = vaddr;
223 qtnf_non_posted_write(paddr, &bda->bda_rc_rx_bd_base);
224
225 pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
226
227
228
229 vaddr = ((struct qtnf_topaz_rx_bd *)vaddr) + priv->rx_bd_num;
230 paddr += priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd);
231
232 extra_params = (struct qtnf_extra_bd_params __iomem *)vaddr;
233
234 ts->ep_next_rx_pkt = &extra_params->param1;
235 qtnf_non_posted_write(paddr + QTNF_BD_PARAM_OFFSET(1),
236 &bda->bda_ep_next_pkt);
237 ts->txqueue_wake = &extra_params->param2;
238 ts->ep_pmstate = &extra_params->param3;
239 ts->dma_msi_dummy = paddr + QTNF_BD_PARAM_OFFSET(4);
240
241 return 0;
242}
243
244static int
245topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap)
246{
247 struct qtnf_topaz_rx_bd *rxbd = &ts->rx_bd_vbase[index];
248 struct sk_buff *skb;
249 dma_addr_t paddr;
250
251 skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
252 if (!skb) {
253 ts->base.rx_skb[index] = NULL;
254 return -ENOMEM;
255 }
256
257 ts->base.rx_skb[index] = skb;
258
259 paddr = pci_map_single(ts->base.pdev, skb->data,
260 SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
261 if (pci_dma_mapping_error(ts->base.pdev, paddr)) {
262 pr_err("skb mapping error: %pad\n", &paddr);
263 return -ENOMEM;
264 }
265
266 rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
267 rxbd->info = cpu_to_le32(QTN_BD_EMPTY | wrap);
268
269 ts->base.rx_bd_w_index = index;
270
271 return 0;
272}
273
274static int topaz_alloc_rx_buffers(struct qtnf_pcie_topaz_state *ts)
275{
276 u16 i;
277 int ret = 0;
278
279 memset(ts->rx_bd_vbase, 0x0,
280 ts->base.rx_bd_num * sizeof(struct qtnf_topaz_rx_bd));
281
282 for (i = 0; i < ts->base.rx_bd_num; i++) {
283 ret = topaz_skb2rbd_attach(ts, i, 0);
284 if (ret)
285 break;
286 }
287
288 ts->rx_bd_vbase[ts->base.rx_bd_num - 1].info |=
289 cpu_to_le32(QTN_BD_WRAP);
290
291 return ret;
292}
293
294
295static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts)
296{
297 struct qtnf_pcie_bus_priv *priv = &ts->base;
298 struct qtnf_topaz_rx_bd *rxbd;
299 struct qtnf_topaz_tx_bd *txbd;
300 struct sk_buff *skb;
301 dma_addr_t paddr;
302 int i;
303
304
305 for (i = 0; i < priv->rx_bd_num; i++) {
306 if (priv->rx_skb && priv->rx_skb[i]) {
307 rxbd = &ts->rx_bd_vbase[i];
308 skb = priv->rx_skb[i];
309 paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
310 pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
311 PCI_DMA_FROMDEVICE);
312 dev_kfree_skb_any(skb);
313 priv->rx_skb[i] = NULL;
314 rxbd->addr = 0;
315 rxbd->info = 0;
316 }
317 }
318
319
320 for (i = 0; i < priv->tx_bd_num; i++) {
321 if (priv->tx_skb && priv->tx_skb[i]) {
322 txbd = &ts->tx_bd_vbase[i];
323 skb = priv->tx_skb[i];
324 paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
325 pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
326 PCI_DMA_TODEVICE);
327 dev_kfree_skb_any(skb);
328 priv->tx_skb[i] = NULL;
329 txbd->addr = 0;
330 txbd->info = 0;
331 }
332 }
333}
334
335static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
336 unsigned int tx_bd_size)
337{
338 struct qtnf_topaz_bda __iomem *bda = ts->bda;
339 struct qtnf_pcie_bus_priv *priv = &ts->base;
340 int ret;
341
342 if (tx_bd_size == 0)
343 tx_bd_size = TOPAZ_TX_BD_SIZE_DEFAULT;
344
345
346 if (tx_bd_size > QTN_PCIE_RC_TX_QUEUE_LEN) {
347 pr_warn("TX BD queue cannot exceed %d\n",
348 QTN_PCIE_RC_TX_QUEUE_LEN);
349 tx_bd_size = QTN_PCIE_RC_TX_QUEUE_LEN;
350 }
351
352 priv->tx_bd_num = tx_bd_size;
353 qtnf_non_posted_write(priv->tx_bd_num, &bda->bda_rc_tx_bd_num);
354 qtnf_non_posted_write(priv->rx_bd_num, &bda->bda_rc_rx_bd_num);
355
356 priv->rx_bd_w_index = 0;
357 priv->rx_bd_r_index = 0;
358
359 ret = qtnf_pcie_alloc_skb_array(priv);
360 if (ret) {
361 pr_err("failed to allocate skb array\n");
362 return ret;
363 }
364
365 ret = topaz_alloc_bd_table(ts, bda);
366 if (ret) {
367 pr_err("failed to allocate bd table\n");
368 return ret;
369 }
370
371 ret = topaz_alloc_rx_buffers(ts);
372 if (ret) {
373 pr_err("failed to allocate rx buffers\n");
374 return ret;
375 }
376
377 return ret;
378}
379
380static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts)
381{
382 struct qtnf_pcie_bus_priv *priv = &ts->base;
383 struct qtnf_topaz_tx_bd *txbd;
384 struct sk_buff *skb;
385 unsigned long flags;
386 dma_addr_t paddr;
387 u32 tx_done_index;
388 int count = 0;
389 int i;
390
391 spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
392
393 tx_done_index = readl(ts->ep_next_rx_pkt);
394 i = priv->tx_bd_r_index;
395
396 if (CIRC_CNT(priv->tx_bd_w_index, tx_done_index, priv->tx_bd_num))
397 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
398 TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
399
400 while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
401 skb = priv->tx_skb[i];
402
403 if (likely(skb)) {
404 txbd = &ts->tx_bd_vbase[i];
405 paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
406 pci_unmap_single(priv->pdev, paddr, skb->len,
407 PCI_DMA_TODEVICE);
408
409 if (skb->dev) {
410 qtnf_update_tx_stats(skb->dev, skb);
411 if (unlikely(priv->tx_stopped)) {
412 qtnf_wake_all_queues(skb->dev);
413 priv->tx_stopped = 0;
414 }
415 }
416
417 dev_kfree_skb_any(skb);
418 }
419
420 priv->tx_skb[i] = NULL;
421 count++;
422
423 if (++i >= priv->tx_bd_num)
424 i = 0;
425 }
426
427 priv->tx_reclaim_done += count;
428 priv->tx_reclaim_req++;
429 priv->tx_bd_r_index = i;
430
431 spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
432}
433
434static void qtnf_try_stop_xmit(struct qtnf_bus *bus, struct net_device *ndev)
435{
436 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
437
438 if (ndev) {
439 netif_tx_stop_all_queues(ndev);
440 ts->base.tx_stopped = 1;
441 }
442
443 writel(0x0, ts->txqueue_wake);
444
445
446 dma_wmb();
447
448
449 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
450 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
451
452
453 tasklet_hi_schedule(&ts->base.reclaim_tq);
454}
455
456static void qtnf_try_wake_xmit(struct qtnf_bus *bus, struct net_device *ndev)
457{
458 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
459 int ready;
460
461 ready = readl(ts->txqueue_wake);
462 if (ready) {
463 netif_wake_queue(ndev);
464 } else {
465
466 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
467 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
468 }
469}
470
471static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state *ts)
472{
473 struct qtnf_pcie_bus_priv *priv = &ts->base;
474
475 if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
476 priv->tx_bd_num)) {
477 qtnf_topaz_data_tx_reclaim(ts);
478
479 if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
480 priv->tx_bd_num)) {
481 priv->tx_full_count++;
482 return 0;
483 }
484 }
485
486 return 1;
487}
488
489static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
490{
491 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
492 struct qtnf_pcie_bus_priv *priv = &ts->base;
493 struct qtnf_topaz_bda __iomem *bda = ts->bda;
494 struct qtnf_topaz_tx_bd *txbd;
495 dma_addr_t skb_paddr;
496 unsigned long flags;
497 int ret = 0;
498 int len;
499 int i;
500
501 spin_lock_irqsave(&priv->tx_lock, flags);
502
503 if (!qtnf_tx_queue_ready(ts)) {
504 qtnf_try_stop_xmit(bus, skb->dev);
505 spin_unlock_irqrestore(&priv->tx_lock, flags);
506 return NETDEV_TX_BUSY;
507 }
508
509 i = priv->tx_bd_w_index;
510 priv->tx_skb[i] = skb;
511 len = skb->len;
512
513 skb_paddr = pci_map_single(priv->pdev, skb->data,
514 skb->len, PCI_DMA_TODEVICE);
515 if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
516 ret = -ENOMEM;
517 goto tx_done;
518 }
519
520 txbd = &ts->tx_bd_vbase[i];
521 txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
522
523 writel(QTN_HOST_LO32(skb_paddr), &bda->request[i].addr);
524 writel(len | QTN_PCIE_TX_VALID_PKT, &bda->request[i].info);
525
526
527 dma_wmb();
528
529
530 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
531 TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
532
533 if (++i >= priv->tx_bd_num)
534 i = 0;
535
536 priv->tx_bd_w_index = i;
537
538tx_done:
539 if (ret) {
540 if (skb->dev)
541 skb->dev->stats.tx_dropped++;
542 dev_kfree_skb_any(skb);
543 }
544
545 priv->tx_done_count++;
546 spin_unlock_irqrestore(&priv->tx_lock, flags);
547
548 qtnf_topaz_data_tx_reclaim(ts);
549
550 return NETDEV_TX_OK;
551}
552
553static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data)
554{
555 struct qtnf_bus *bus = (struct qtnf_bus *)data;
556 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
557 struct qtnf_pcie_bus_priv *priv = &ts->base;
558
559 if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts))
560 return IRQ_NONE;
561
562 priv->pcie_irq_count++;
563
564 qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
565 qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
566
567 if (napi_schedule_prep(&bus->mux_napi)) {
568 disable_rx_irqs(ts);
569 __napi_schedule(&bus->mux_napi);
570 }
571
572 tasklet_hi_schedule(&priv->reclaim_tq);
573
574 if (!priv->msi_enabled)
575 qtnf_deassert_intx(ts);
576
577 return IRQ_HANDLED;
578}
579
580static int qtnf_rx_data_ready(struct qtnf_pcie_topaz_state *ts)
581{
582 u16 index = ts->base.rx_bd_r_index;
583 struct qtnf_topaz_rx_bd *rxbd;
584 u32 descw;
585
586 rxbd = &ts->rx_bd_vbase[index];
587 descw = le32_to_cpu(rxbd->info);
588
589 if (descw & QTN_BD_EMPTY)
590 return 0;
591
592 return 1;
593}
594
595static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget)
596{
597 struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
598 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
599 struct qtnf_pcie_bus_priv *priv = &ts->base;
600 struct net_device *ndev = NULL;
601 struct sk_buff *skb = NULL;
602 int processed = 0;
603 struct qtnf_topaz_rx_bd *rxbd;
604 dma_addr_t skb_paddr;
605 int consume;
606 u32 descw;
607 u32 poffset;
608 u32 psize;
609 u16 r_idx;
610 u16 w_idx;
611 int ret;
612
613 while (processed < budget) {
614 if (!qtnf_rx_data_ready(ts))
615 goto rx_out;
616
617 r_idx = priv->rx_bd_r_index;
618 rxbd = &ts->rx_bd_vbase[r_idx];
619 descw = le32_to_cpu(rxbd->info);
620
621 skb = priv->rx_skb[r_idx];
622 poffset = QTN_GET_OFFSET(descw);
623 psize = QTN_GET_LEN(descw);
624 consume = 1;
625
626 if (descw & QTN_BD_EMPTY) {
627 pr_warn("skip invalid rxbd[%d]\n", r_idx);
628 consume = 0;
629 }
630
631 if (!skb) {
632 pr_warn("skip missing rx_skb[%d]\n", r_idx);
633 consume = 0;
634 }
635
636 if (skb && (skb_tailroom(skb) < psize)) {
637 pr_err("skip packet with invalid length: %u > %u\n",
638 psize, skb_tailroom(skb));
639 consume = 0;
640 }
641
642 if (skb) {
643 skb_paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
644 pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
645 PCI_DMA_FROMDEVICE);
646 }
647
648 if (consume) {
649 skb_reserve(skb, poffset);
650 skb_put(skb, psize);
651 ndev = qtnf_classify_skb(bus, skb);
652 if (likely(ndev)) {
653 qtnf_update_rx_stats(ndev, skb);
654 skb->protocol = eth_type_trans(skb, ndev);
655 netif_receive_skb(skb);
656 } else {
657 pr_debug("drop untagged skb\n");
658 bus->mux_dev.stats.rx_dropped++;
659 dev_kfree_skb_any(skb);
660 }
661 } else {
662 if (skb) {
663 bus->mux_dev.stats.rx_dropped++;
664 dev_kfree_skb_any(skb);
665 }
666 }
667
668
669 if (((++ts->rx_pkt_count) & RX_DONE_INTR_MSK) == 0)
670 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RX_DONE_IRQ),
671 TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
672
673 priv->rx_skb[r_idx] = NULL;
674 if (++r_idx >= priv->rx_bd_num)
675 r_idx = 0;
676
677 priv->rx_bd_r_index = r_idx;
678
679
680 w_idx = priv->rx_bd_w_index;
681 while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
682 priv->rx_bd_num) > 0) {
683 if (++w_idx >= priv->rx_bd_num)
684 w_idx = 0;
685
686 ret = topaz_skb2rbd_attach(ts, w_idx,
687 descw & QTN_BD_WRAP);
688 if (ret) {
689 pr_err("failed to allocate new rx_skb[%d]\n",
690 w_idx);
691 break;
692 }
693 }
694
695 processed++;
696 }
697
698rx_out:
699 if (processed < budget) {
700 napi_complete(napi);
701 enable_rx_irqs(ts);
702 }
703
704 return processed;
705}
706
707static void
708qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
709{
710 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
711
712 qtnf_try_wake_xmit(bus, ndev);
713 tasklet_hi_schedule(&ts->base.reclaim_tq);
714}
715
716static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
717{
718 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
719
720 napi_enable(&bus->mux_napi);
721 enable_rx_irqs(ts);
722}
723
724static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
725{
726 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
727
728 disable_rx_irqs(ts);
729 napi_disable(&bus->mux_napi);
730}
731
732static const struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = {
733
734 .control_tx = qtnf_pcie_control_tx,
735
736
737 .data_tx = qtnf_pcie_data_tx,
738 .data_tx_timeout = qtnf_pcie_data_tx_timeout,
739 .data_rx_start = qtnf_pcie_data_rx_start,
740 .data_rx_stop = qtnf_pcie_data_rx_stop,
741};
742
743static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
744{
745 struct qtnf_bus *bus = dev_get_drvdata(s->private);
746 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
747
748 seq_printf(s, "pcie_irq_count(%u)\n", ts->base.pcie_irq_count);
749
750 return 0;
751}
752
753static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
754{
755 struct qtnf_bus *bus = dev_get_drvdata(s->private);
756 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
757 struct qtnf_pcie_bus_priv *priv = &ts->base;
758 u32 tx_done_index = readl(ts->ep_next_rx_pkt);
759
760 seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
761 seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
762 seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
763 seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
764
765 seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
766 seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
767 seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
768
769 seq_printf(s, "tx host queue len(%u)\n",
770 CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
771 priv->tx_bd_num));
772 seq_printf(s, "tx reclaim queue len(%u)\n",
773 CIRC_CNT(tx_done_index, priv->tx_bd_r_index,
774 priv->tx_bd_num));
775 seq_printf(s, "tx card queue len(%u)\n",
776 CIRC_CNT(priv->tx_bd_w_index, tx_done_index,
777 priv->tx_bd_num));
778
779 seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
780 seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
781 seq_printf(s, "rx alloc queue len(%u)\n",
782 CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
783 priv->rx_bd_num));
784
785 return 0;
786}
787
788static void qtnf_reset_dma_offset(struct qtnf_pcie_topaz_state *ts)
789{
790 struct qtnf_topaz_bda __iomem *bda = ts->bda;
791 u32 offset = readl(&bda->bda_dma_offset);
792
793 if ((offset & PCIE_DMA_OFFSET_ERROR_MASK) != PCIE_DMA_OFFSET_ERROR)
794 return;
795
796 writel(0x0, &bda->bda_dma_offset);
797}
798
799static int qtnf_pcie_endian_detect(struct qtnf_pcie_topaz_state *ts)
800{
801 struct qtnf_topaz_bda __iomem *bda = ts->bda;
802 u32 timeout = 0;
803 u32 endian;
804 int ret = 0;
805
806 writel(QTN_PCI_ENDIAN_DETECT_DATA, &bda->bda_pci_endian);
807
808
809 dma_wmb();
810
811 writel(QTN_PCI_ENDIAN_VALID_STATUS, &bda->bda_pci_pre_status);
812
813 while (readl(&bda->bda_pci_post_status) !=
814 QTN_PCI_ENDIAN_VALID_STATUS) {
815 usleep_range(1000, 1200);
816 if (++timeout > QTN_FW_DL_TIMEOUT_MS) {
817 pr_err("card endianness detection timed out\n");
818 ret = -ETIMEDOUT;
819 goto endian_out;
820 }
821 }
822
823
824 dma_rmb();
825
826 endian = readl(&bda->bda_pci_endian);
827 WARN(endian != QTN_PCI_LITTLE_ENDIAN,
828 "%s: unexpected card endianness", __func__);
829
830endian_out:
831 writel(0, &bda->bda_pci_pre_status);
832 writel(0, &bda->bda_pci_post_status);
833 writel(0, &bda->bda_pci_endian);
834
835 return ret;
836}
837
838static int qtnf_pre_init_ep(struct qtnf_bus *bus)
839{
840 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
841 struct qtnf_topaz_bda __iomem *bda = ts->bda;
842 u32 flags;
843 int ret;
844
845 ret = qtnf_pcie_endian_detect(ts);
846 if (ret < 0) {
847 pr_err("failed to detect card endianness\n");
848 return ret;
849 }
850
851 writeb(ts->base.msi_enabled, &ts->bda->bda_rc_msi_enabled);
852 qtnf_reset_dma_offset(ts);
853
854
855 flags = readl(&bda->bda_flags) | QTN_BDA_HOST_QLINK_DRV;
856
857 if (ts->base.flashboot)
858 flags |= QTN_BDA_FLASH_BOOT;
859 else
860 flags &= ~QTN_BDA_FLASH_BOOT;
861
862 writel(flags, &bda->bda_flags);
863
864 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_RDY);
865 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_RDY,
866 QTN_FW_DL_TIMEOUT_MS)) {
867 pr_err("card is not ready to boot...\n");
868 return -ETIMEDOUT;
869 }
870
871 return ret;
872}
873
874static int qtnf_post_init_ep(struct qtnf_pcie_topaz_state *ts)
875{
876 struct pci_dev *pdev = ts->base.pdev;
877
878 setup_rx_irqs(ts);
879 disable_rx_irqs(ts);
880
881 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_QLINK_DONE,
882 QTN_FW_QLINK_TIMEOUT_MS))
883 return -ETIMEDOUT;
884
885 enable_irq(pdev->irq);
886 return 0;
887}
888
889static int
890qtnf_ep_fw_load(struct qtnf_pcie_topaz_state *ts, const u8 *fw, u32 fw_size)
891{
892 struct qtnf_topaz_bda __iomem *bda = ts->bda;
893 struct pci_dev *pdev = ts->base.pdev;
894 u32 remaining = fw_size;
895 u8 *curr = (u8 *)fw;
896 u32 blksize;
897 u32 nblocks;
898 u32 offset;
899 u32 count;
900 u32 size;
901 dma_addr_t paddr;
902 void *data;
903 int ret = 0;
904
905 pr_debug("FW upload started: fw_addr = 0x%p, size=%d\n", fw, fw_size);
906
907 blksize = ts->base.fw_blksize;
908
909 if (blksize < PAGE_SIZE)
910 blksize = PAGE_SIZE;
911
912 while (blksize >= PAGE_SIZE) {
913 pr_debug("allocating %u bytes to upload FW\n", blksize);
914 data = dma_alloc_coherent(&pdev->dev, blksize,
915 &paddr, GFP_KERNEL);
916 if (data)
917 break;
918 blksize /= 2;
919 }
920
921 if (!data) {
922 pr_err("failed to allocate DMA buffer for FW upload\n");
923 ret = -ENOMEM;
924 goto fw_load_out;
925 }
926
927 nblocks = NBLOCKS(fw_size, blksize);
928 offset = readl(&bda->bda_dma_offset);
929
930 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_LOAD);
931 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_EP_RDY,
932 QTN_FW_DL_TIMEOUT_MS)) {
933 pr_err("card is not ready to download FW\n");
934 ret = -ETIMEDOUT;
935 goto fw_load_map;
936 }
937
938 for (count = 0 ; count < nblocks; count++) {
939 size = (remaining > blksize) ? blksize : remaining;
940
941 memcpy(data, curr, size);
942 qtnf_non_posted_write(paddr + offset, &bda->bda_img);
943 qtnf_non_posted_write(size, &bda->bda_img_size);
944
945 pr_debug("chunk[%u] VA[0x%p] PA[%pad] sz[%u]\n",
946 count, (void *)curr, &paddr, size);
947
948 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
949 if (qtnf_poll_state(&ts->bda->bda_bootstate,
950 QTN_BDA_FW_BLOCK_DONE,
951 QTN_FW_DL_TIMEOUT_MS)) {
952 pr_err("confirmation for block #%d timed out\n", count);
953 ret = -ETIMEDOUT;
954 goto fw_load_map;
955 }
956
957 remaining = (remaining < size) ? remaining : (remaining - size);
958 curr += size;
959 }
960
961
962 qtnf_non_posted_write(0, &bda->bda_img);
963 qtnf_non_posted_write(0, &bda->bda_img_size);
964
965 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
966 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_DONE,
967 QTN_FW_DL_TIMEOUT_MS)) {
968 pr_err("confirmation for the last block timed out\n");
969 ret = -ETIMEDOUT;
970 goto fw_load_map;
971 }
972
973
974 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_END);
975 if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_LOAD_DONE,
976 QTN_FW_DL_TIMEOUT_MS)) {
977 pr_err("confirmation for FW upload completion timed out\n");
978 ret = -ETIMEDOUT;
979 goto fw_load_map;
980 }
981
982 pr_debug("FW upload completed: totally sent %d blocks\n", count);
983
984fw_load_map:
985 dma_free_coherent(&pdev->dev, blksize, data, paddr);
986
987fw_load_out:
988 return ret;
989}
990
991static int qtnf_topaz_fw_upload(struct qtnf_pcie_topaz_state *ts,
992 const char *fwname)
993{
994 const struct firmware *fw;
995 struct pci_dev *pdev = ts->base.pdev;
996 int ret;
997
998 if (qtnf_poll_state(&ts->bda->bda_bootstate,
999 QTN_BDA_FW_LOAD_RDY,
1000 QTN_FW_DL_TIMEOUT_MS)) {
1001 pr_err("%s: card is not ready\n", fwname);
1002 return -1;
1003 }
1004
1005 pr_info("starting firmware upload: %s\n", fwname);
1006
1007 ret = request_firmware(&fw, fwname, &pdev->dev);
1008 if (ret < 0) {
1009 pr_err("%s: request_firmware error %d\n", fwname, ret);
1010 return -1;
1011 }
1012
1013 ret = qtnf_ep_fw_load(ts, fw->data, fw->size);
1014 release_firmware(fw);
1015
1016 if (ret)
1017 pr_err("%s: FW upload error\n", fwname);
1018
1019 return ret;
1020}
1021
1022static void qtnf_topaz_fw_work_handler(struct work_struct *work)
1023{
1024 struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
1025 struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
1026 int ret;
1027 int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT;
1028
1029 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT);
1030
1031 if (bootloader_needed) {
1032 ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_BOOTLD_NAME);
1033 if (ret)
1034 goto fw_load_exit;
1035
1036 ret = qtnf_pre_init_ep(bus);
1037 if (ret)
1038 goto fw_load_exit;
1039
1040 qtnf_set_state(&ts->bda->bda_bootstate,
1041 QTN_BDA_FW_TARGET_BOOT);
1042 }
1043
1044 if (ts->base.flashboot) {
1045 pr_info("booting firmware from flash\n");
1046
1047 ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1048 QTN_BDA_FW_FLASH_BOOT,
1049 QTN_FW_DL_TIMEOUT_MS);
1050 if (ret)
1051 goto fw_load_exit;
1052 } else {
1053 ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_FW_NAME);
1054 if (ret)
1055 goto fw_load_exit;
1056
1057 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_START);
1058 ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1059 QTN_BDA_FW_CONFIG,
1060 QTN_FW_QLINK_TIMEOUT_MS);
1061 if (ret) {
1062 pr_err("FW bringup timed out\n");
1063 goto fw_load_exit;
1064 }
1065
1066 qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_RUN);
1067 ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1068 QTN_BDA_FW_RUNNING,
1069 QTN_FW_QLINK_TIMEOUT_MS);
1070 if (ret) {
1071 pr_err("card bringup timed out\n");
1072 goto fw_load_exit;
1073 }
1074 }
1075
1076 pr_info("firmware is up and running\n");
1077
1078 ret = qtnf_post_init_ep(ts);
1079 if (ret)
1080 pr_err("FW runtime failure\n");
1081
1082fw_load_exit:
1083 qtnf_pcie_fw_boot_done(bus, ret ? false : true);
1084
1085 if (ret == 0) {
1086 qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
1087 qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
1088 }
1089}
1090
1091static void qtnf_reclaim_tasklet_fn(unsigned long data)
1092{
1093 struct qtnf_pcie_topaz_state *ts = (void *)data;
1094
1095 qtnf_topaz_data_tx_reclaim(ts);
1096}
1097
1098static u64 qtnf_topaz_dma_mask_get(void)
1099{
1100 return DMA_BIT_MASK(32);
1101}
1102
1103static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num)
1104{
1105 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1106 struct pci_dev *pdev = ts->base.pdev;
1107 struct qtnf_shm_ipc_int ipc_int;
1108 unsigned long irqflags;
1109 int ret;
1110
1111 bus->bus_ops = &qtnf_pcie_topaz_bus_ops;
1112 INIT_WORK(&bus->fw_work, qtnf_topaz_fw_work_handler);
1113 ts->bda = ts->base.epmem_bar;
1114
1115
1116 if (ts->base.msi_enabled)
1117 irqflags = IRQF_NOBALANCING;
1118 else
1119 irqflags = IRQF_NOBALANCING | IRQF_SHARED;
1120
1121 ret = devm_request_irq(&pdev->dev, pdev->irq,
1122 &qtnf_pcie_topaz_interrupt,
1123 irqflags, "qtnf_topaz_irq", (void *)bus);
1124 if (ret) {
1125 pr_err("failed to request pcie irq %d\n", pdev->irq);
1126 return ret;
1127 }
1128
1129 disable_irq(pdev->irq);
1130
1131 ret = qtnf_pre_init_ep(bus);
1132 if (ret) {
1133 pr_err("failed to init card\n");
1134 return ret;
1135 }
1136
1137 ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num);
1138 if (ret) {
1139 pr_err("PCIE xfer init failed\n");
1140 return ret;
1141 }
1142
1143 tasklet_init(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn,
1144 (unsigned long)ts);
1145 netif_napi_add(&bus->mux_dev, &bus->mux_napi,
1146 qtnf_topaz_rx_poll, 10);
1147
1148 ipc_int.fn = qtnf_topaz_ipc_gen_ep_int;
1149 ipc_int.arg = ts;
1150 qtnf_pcie_init_shm_ipc(&ts->base, &ts->bda->bda_shm_reg1,
1151 &ts->bda->bda_shm_reg2, &ipc_int);
1152
1153 return 0;
1154}
1155
1156static void qtnf_pcie_topaz_remove(struct qtnf_bus *bus)
1157{
1158 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1159
1160 qtnf_topaz_reset_ep(ts);
1161 qtnf_topaz_free_xfer_buffers(ts);
1162}
1163
1164#ifdef CONFIG_PM_SLEEP
1165static int qtnf_pcie_topaz_suspend(struct qtnf_bus *bus)
1166{
1167 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1168 struct pci_dev *pdev = ts->base.pdev;
1169
1170 writel((u32 __force)PCI_D3hot, ts->ep_pmstate);
1171 dma_wmb();
1172 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
1173 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
1174
1175 pci_save_state(pdev);
1176 pci_enable_wake(pdev, PCI_D3hot, 1);
1177 pci_set_power_state(pdev, PCI_D3hot);
1178
1179 return 0;
1180}
1181
1182static int qtnf_pcie_topaz_resume(struct qtnf_bus *bus)
1183{
1184 struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1185 struct pci_dev *pdev = ts->base.pdev;
1186
1187 pci_set_power_state(pdev, PCI_D0);
1188 pci_restore_state(pdev);
1189 pci_enable_wake(pdev, PCI_D0, 0);
1190
1191 writel((u32 __force)PCI_D0, ts->ep_pmstate);
1192 dma_wmb();
1193 writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
1194 TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
1195
1196 return 0;
1197}
1198#endif
1199
1200struct qtnf_bus *qtnf_pcie_topaz_alloc(struct pci_dev *pdev)
1201{
1202 struct qtnf_bus *bus;
1203 struct qtnf_pcie_topaz_state *ts;
1204
1205 bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ts), GFP_KERNEL);
1206 if (!bus)
1207 return NULL;
1208
1209 ts = get_bus_priv(bus);
1210 ts->base.probe_cb = qtnf_pcie_topaz_probe;
1211 ts->base.remove_cb = qtnf_pcie_topaz_remove;
1212 ts->base.dma_mask_get_cb = qtnf_topaz_dma_mask_get;
1213#ifdef CONFIG_PM_SLEEP
1214 ts->base.resume_cb = qtnf_pcie_topaz_resume;
1215 ts->base.suspend_cb = qtnf_pcie_topaz_suspend;
1216#endif
1217
1218 return bus;
1219}
1220