1
2
3
4
5#include <linux/module.h>
6#include <linux/pci.h>
7#include "main.h"
8#include "pci.h"
9#include "reg.h"
10#include "tx.h"
11#include "rx.h"
12#include "fw.h"
13#include "ps.h"
14#include "debug.h"
15
16static bool rtw_disable_msi;
17static bool rtw_pci_disable_aspm;
18module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
19module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
20MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
21MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
22
23static u32 rtw_pci_tx_queue_idx_addr[] = {
24 [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
25 [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
26 [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
27 [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ,
28 [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ,
29 [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q,
30 [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
31};
32
33static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
34{
35 switch (queue) {
36 case RTW_TX_QUEUE_BCN:
37 return TX_DESC_QSEL_BEACON;
38 case RTW_TX_QUEUE_H2C:
39 return TX_DESC_QSEL_H2C;
40 case RTW_TX_QUEUE_MGMT:
41 return TX_DESC_QSEL_MGMT;
42 case RTW_TX_QUEUE_HI0:
43 return TX_DESC_QSEL_HIGH;
44 default:
45 return skb->priority;
46 }
47};
48
49static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
50{
51 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
52
53 return readb(rtwpci->mmap + addr);
54}
55
56static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
57{
58 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
59
60 return readw(rtwpci->mmap + addr);
61}
62
63static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
64{
65 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
66
67 return readl(rtwpci->mmap + addr);
68}
69
70static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
71{
72 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
73
74 writeb(val, rtwpci->mmap + addr);
75}
76
77static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
78{
79 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
80
81 writew(val, rtwpci->mmap + addr);
82}
83
84static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
85{
86 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
87
88 writel(val, rtwpci->mmap + addr);
89}
90
91static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
92{
93 int offset = tx_ring->r.desc_size * idx;
94
95 return tx_ring->r.head + offset;
96}
97
98static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
99 struct rtw_pci_tx_ring *tx_ring)
100{
101 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
102 struct rtw_pci_tx_data *tx_data;
103 struct sk_buff *skb, *tmp;
104 dma_addr_t dma;
105
106
107 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
108 __skb_unlink(skb, &tx_ring->queue);
109 tx_data = rtw_pci_get_tx_data(skb);
110 dma = tx_data->dma;
111
112 pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
113 dev_kfree_skb_any(skb);
114 }
115}
116
117static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
118 struct rtw_pci_tx_ring *tx_ring)
119{
120 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
121 u8 *head = tx_ring->r.head;
122 u32 len = tx_ring->r.len;
123 int ring_sz = len * tx_ring->r.desc_size;
124
125 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
126
127
128 pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
129 tx_ring->r.head = NULL;
130}
131
132static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
133 struct rtw_pci_rx_ring *rx_ring)
134{
135 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
136 struct sk_buff *skb;
137 int buf_sz = RTK_PCI_RX_BUF_SIZE;
138 dma_addr_t dma;
139 int i;
140
141 for (i = 0; i < rx_ring->r.len; i++) {
142 skb = rx_ring->buf[i];
143 if (!skb)
144 continue;
145
146 dma = *((dma_addr_t *)skb->cb);
147 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
148 dev_kfree_skb(skb);
149 rx_ring->buf[i] = NULL;
150 }
151}
152
153static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
154 struct rtw_pci_rx_ring *rx_ring)
155{
156 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
157 u8 *head = rx_ring->r.head;
158 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
159
160 rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
161
162 pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
163}
164
165static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
166{
167 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
168 struct rtw_pci_tx_ring *tx_ring;
169 struct rtw_pci_rx_ring *rx_ring;
170 int i;
171
172 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
173 tx_ring = &rtwpci->tx_rings[i];
174 rtw_pci_free_tx_ring(rtwdev, tx_ring);
175 }
176
177 for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
178 rx_ring = &rtwpci->rx_rings[i];
179 rtw_pci_free_rx_ring(rtwdev, rx_ring);
180 }
181}
182
183static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
184 struct rtw_pci_tx_ring *tx_ring,
185 u8 desc_size, u32 len)
186{
187 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
188 int ring_sz = desc_size * len;
189 dma_addr_t dma;
190 u8 *head;
191
192 if (len > TRX_BD_IDX_MASK) {
193 rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
194 return -EINVAL;
195 }
196
197 head = pci_zalloc_consistent(pdev, ring_sz, &dma);
198 if (!head) {
199 rtw_err(rtwdev, "failed to allocate tx ring\n");
200 return -ENOMEM;
201 }
202
203 skb_queue_head_init(&tx_ring->queue);
204 tx_ring->r.head = head;
205 tx_ring->r.dma = dma;
206 tx_ring->r.len = len;
207 tx_ring->r.desc_size = desc_size;
208 tx_ring->r.wp = 0;
209 tx_ring->r.rp = 0;
210
211 return 0;
212}
213
214static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
215 struct rtw_pci_rx_ring *rx_ring,
216 u32 idx, u32 desc_sz)
217{
218 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
219 struct rtw_pci_rx_buffer_desc *buf_desc;
220 int buf_sz = RTK_PCI_RX_BUF_SIZE;
221 dma_addr_t dma;
222
223 if (!skb)
224 return -EINVAL;
225
226 dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
227 if (pci_dma_mapping_error(pdev, dma))
228 return -EBUSY;
229
230 *((dma_addr_t *)skb->cb) = dma;
231 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
232 idx * desc_sz);
233 memset(buf_desc, 0, sizeof(*buf_desc));
234 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
235 buf_desc->dma = cpu_to_le32(dma);
236
237 return 0;
238}
239
240static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
241 struct rtw_pci_rx_ring *rx_ring,
242 u32 idx, u32 desc_sz)
243{
244 struct device *dev = rtwdev->dev;
245 struct rtw_pci_rx_buffer_desc *buf_desc;
246 int buf_sz = RTK_PCI_RX_BUF_SIZE;
247
248 dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
249
250 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
251 idx * desc_sz);
252 memset(buf_desc, 0, sizeof(*buf_desc));
253 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
254 buf_desc->dma = cpu_to_le32(dma);
255}
256
257static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
258 struct rtw_pci_rx_ring *rx_ring,
259 u8 desc_size, u32 len)
260{
261 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
262 struct sk_buff *skb = NULL;
263 dma_addr_t dma;
264 u8 *head;
265 int ring_sz = desc_size * len;
266 int buf_sz = RTK_PCI_RX_BUF_SIZE;
267 int i, allocated;
268 int ret = 0;
269
270 if (len > TRX_BD_IDX_MASK) {
271 rtw_err(rtwdev, "len %d exceeds maximum RX entries\n", len);
272 return -EINVAL;
273 }
274
275 head = pci_zalloc_consistent(pdev, ring_sz, &dma);
276 if (!head) {
277 rtw_err(rtwdev, "failed to allocate rx ring\n");
278 return -ENOMEM;
279 }
280 rx_ring->r.head = head;
281
282 for (i = 0; i < len; i++) {
283 skb = dev_alloc_skb(buf_sz);
284 if (!skb) {
285 allocated = i;
286 ret = -ENOMEM;
287 goto err_out;
288 }
289
290 memset(skb->data, 0, buf_sz);
291 rx_ring->buf[i] = skb;
292 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
293 if (ret) {
294 allocated = i;
295 dev_kfree_skb_any(skb);
296 goto err_out;
297 }
298 }
299
300 rx_ring->r.dma = dma;
301 rx_ring->r.len = len;
302 rx_ring->r.desc_size = desc_size;
303 rx_ring->r.wp = 0;
304 rx_ring->r.rp = 0;
305
306 return 0;
307
308err_out:
309 for (i = 0; i < allocated; i++) {
310 skb = rx_ring->buf[i];
311 if (!skb)
312 continue;
313 dma = *((dma_addr_t *)skb->cb);
314 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
315 dev_kfree_skb_any(skb);
316 rx_ring->buf[i] = NULL;
317 }
318 pci_free_consistent(pdev, ring_sz, head, dma);
319
320 rtw_err(rtwdev, "failed to init rx buffer\n");
321
322 return ret;
323}
324
325static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
326{
327 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
328 struct rtw_pci_tx_ring *tx_ring;
329 struct rtw_pci_rx_ring *rx_ring;
330 struct rtw_chip_info *chip = rtwdev->chip;
331 int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
332 int tx_desc_size, rx_desc_size;
333 u32 len;
334 int ret;
335
336 tx_desc_size = chip->tx_buf_desc_sz;
337
338 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
339 tx_ring = &rtwpci->tx_rings[i];
340 len = max_num_of_tx_queue(i);
341 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
342 if (ret)
343 goto out;
344 }
345
346 rx_desc_size = chip->rx_buf_desc_sz;
347
348 for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
349 rx_ring = &rtwpci->rx_rings[j];
350 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
351 RTK_MAX_RX_DESC_NUM);
352 if (ret)
353 goto out;
354 }
355
356 return 0;
357
358out:
359 tx_alloced = i;
360 for (i = 0; i < tx_alloced; i++) {
361 tx_ring = &rtwpci->tx_rings[i];
362 rtw_pci_free_tx_ring(rtwdev, tx_ring);
363 }
364
365 rx_alloced = j;
366 for (j = 0; j < rx_alloced; j++) {
367 rx_ring = &rtwpci->rx_rings[j];
368 rtw_pci_free_rx_ring(rtwdev, rx_ring);
369 }
370
371 return ret;
372}
373
374static void rtw_pci_deinit(struct rtw_dev *rtwdev)
375{
376 rtw_pci_free_trx_ring(rtwdev);
377}
378
379static int rtw_pci_init(struct rtw_dev *rtwdev)
380{
381 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
382 int ret = 0;
383
384 rtwpci->irq_mask[0] = IMR_HIGHDOK |
385 IMR_MGNTDOK |
386 IMR_BKDOK |
387 IMR_BEDOK |
388 IMR_VIDOK |
389 IMR_VODOK |
390 IMR_ROK |
391 IMR_BCNDMAINT_E |
392 0;
393 rtwpci->irq_mask[1] = IMR_TXFOVW |
394 0;
395 rtwpci->irq_mask[3] = IMR_H2CDOK |
396 0;
397 spin_lock_init(&rtwpci->irq_lock);
398 spin_lock_init(&rtwpci->hwirq_lock);
399 ret = rtw_pci_init_trx_ring(rtwdev);
400
401 return ret;
402}
403
404static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
405{
406 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
407 u32 len;
408 u8 tmp;
409 dma_addr_t dma;
410
411 tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
412 rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
413
414 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
415 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
416
417 if (!rtw_chip_wcpu_11n(rtwdev)) {
418 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
419 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
420 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
421 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
422 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
423 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
424 }
425
426 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
427 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
428 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
429 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
430 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
431 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
432
433 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
434 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
435 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
436 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
437 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
438 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
439
440 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
441 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
442 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
443 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
444 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
445 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
446
447 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
448 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
449 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
450 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
451 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
452 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
453
454 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
455 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
456 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
457 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
458 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
459 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
460
461 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
462 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
463 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
464 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
465 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
466 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
467
468 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
469 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
470 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
471 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
472 rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
473 rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
474
475
476 rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
477
478
479 if (rtw_chip_wcpu_11ac(rtwdev))
480 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
481 BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
482}
483
484static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
485{
486 rtw_pci_reset_buf_desc(rtwdev);
487}
488
489static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
490 struct rtw_pci *rtwpci)
491{
492 unsigned long flags;
493
494 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
495
496 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
497 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
498 if (rtw_chip_wcpu_11ac(rtwdev))
499 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
500
501 rtwpci->irq_enabled = true;
502
503 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
504}
505
506static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
507 struct rtw_pci *rtwpci)
508{
509 unsigned long flags;
510
511 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
512
513 if (!rtwpci->irq_enabled)
514 goto out;
515
516 rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
517 rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
518 if (rtw_chip_wcpu_11ac(rtwdev))
519 rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
520
521 rtwpci->irq_enabled = false;
522
523out:
524 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
525}
526
527static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
528{
529
530 rtw_write32_set(rtwdev, RTK_PCI_CTRL,
531 BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
532 rtwpci->rx_tag = 0;
533}
534
535static int rtw_pci_setup(struct rtw_dev *rtwdev)
536{
537 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
538
539 rtw_pci_reset_trx_ring(rtwdev);
540 rtw_pci_dma_reset(rtwdev, rtwpci);
541
542 return 0;
543}
544
545static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
546{
547 struct rtw_pci_tx_ring *tx_ring;
548 u8 queue;
549
550 rtw_pci_reset_trx_ring(rtwdev);
551 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
552 tx_ring = &rtwpci->tx_rings[queue];
553 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
554 }
555}
556
557static int rtw_pci_start(struct rtw_dev *rtwdev)
558{
559 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
560
561 spin_lock_bh(&rtwpci->irq_lock);
562 rtw_pci_enable_interrupt(rtwdev, rtwpci);
563 spin_unlock_bh(&rtwpci->irq_lock);
564
565 return 0;
566}
567
568static void rtw_pci_stop(struct rtw_dev *rtwdev)
569{
570 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
571
572 spin_lock_bh(&rtwpci->irq_lock);
573 rtw_pci_disable_interrupt(rtwdev, rtwpci);
574 rtw_pci_dma_release(rtwdev, rtwpci);
575 spin_unlock_bh(&rtwpci->irq_lock);
576}
577
578static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
579{
580 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
581 struct rtw_pci_tx_ring *tx_ring;
582 bool tx_empty = true;
583 u8 queue;
584
585 lockdep_assert_held(&rtwpci->irq_lock);
586
587
588 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
589
590
591
592 if (queue == RTW_TX_QUEUE_BCN ||
593 queue == RTW_TX_QUEUE_H2C)
594 continue;
595
596 tx_ring = &rtwpci->tx_rings[queue];
597
598
599 if (skb_queue_len(&tx_ring->queue)) {
600 tx_empty = false;
601 break;
602 }
603 }
604
605 if (!tx_empty) {
606 rtw_dbg(rtwdev, RTW_DBG_PS,
607 "TX path not empty, cannot enter deep power save state\n");
608 return;
609 }
610
611 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
612 rtw_power_mode_change(rtwdev, true);
613}
614
615static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
616{
617 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
618
619 lockdep_assert_held(&rtwpci->irq_lock);
620
621 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
622 rtw_power_mode_change(rtwdev, false);
623}
624
625static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
626{
627 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
628
629 spin_lock_bh(&rtwpci->irq_lock);
630
631 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
632 rtw_pci_deep_ps_enter(rtwdev);
633
634 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
635 rtw_pci_deep_ps_leave(rtwdev);
636
637 spin_unlock_bh(&rtwpci->irq_lock);
638}
639
640static u8 ac_to_hwq[] = {
641 [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
642 [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
643 [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
644 [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
645};
646
647static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
648{
649 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
650 __le16 fc = hdr->frame_control;
651 u8 q_mapping = skb_get_queue_mapping(skb);
652 u8 queue;
653
654 if (unlikely(ieee80211_is_beacon(fc)))
655 queue = RTW_TX_QUEUE_BCN;
656 else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
657 queue = RTW_TX_QUEUE_MGMT;
658 else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
659 queue = ac_to_hwq[IEEE80211_AC_BE];
660 else
661 queue = ac_to_hwq[q_mapping];
662
663 return queue;
664}
665
666static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
667 struct rtw_pci_tx_ring *ring)
668{
669 struct sk_buff *prev = skb_dequeue(&ring->queue);
670 struct rtw_pci_tx_data *tx_data;
671 dma_addr_t dma;
672
673 if (!prev)
674 return;
675
676 tx_data = rtw_pci_get_tx_data(prev);
677 dma = tx_data->dma;
678 pci_unmap_single(rtwpci->pdev, dma, prev->len,
679 PCI_DMA_TODEVICE);
680 dev_kfree_skb_any(prev);
681}
682
683static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
684 struct rtw_pci_rx_ring *rx_ring,
685 u32 idx)
686{
687 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
688 struct rtw_chip_info *chip = rtwdev->chip;
689 struct rtw_pci_rx_buffer_desc *buf_desc;
690 u32 desc_sz = chip->rx_buf_desc_sz;
691 u16 total_pkt_size;
692
693 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
694 idx * desc_sz);
695 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
696
697
698 if (total_pkt_size != rtwpci->rx_tag)
699 rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
700
701 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
702}
703
704static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
705{
706 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
707 struct rtw_pci_tx_ring *ring;
708 u32 bd_idx;
709
710 ring = &rtwpci->tx_rings[queue];
711 bd_idx = rtw_pci_tx_queue_idx_addr[queue];
712
713 spin_lock_bh(&rtwpci->irq_lock);
714 rtw_pci_deep_ps_leave(rtwdev);
715 rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
716 spin_unlock_bh(&rtwpci->irq_lock);
717}
718
719static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
720{
721 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
722 u8 queue;
723
724 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
725 if (test_and_clear_bit(queue, rtwpci->tx_queued))
726 rtw_pci_tx_kick_off_queue(rtwdev, queue);
727}
728
729static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
730 struct rtw_tx_pkt_info *pkt_info,
731 struct sk_buff *skb, u8 queue)
732{
733 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
734 struct rtw_chip_info *chip = rtwdev->chip;
735 struct rtw_pci_tx_ring *ring;
736 struct rtw_pci_tx_data *tx_data;
737 dma_addr_t dma;
738 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
739 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
740 u32 size;
741 u32 psb_len;
742 u8 *pkt_desc;
743 struct rtw_pci_tx_buffer_desc *buf_desc;
744
745 ring = &rtwpci->tx_rings[queue];
746
747 size = skb->len;
748
749 if (queue == RTW_TX_QUEUE_BCN)
750 rtw_pci_release_rsvd_page(rtwpci, ring);
751 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
752 return -ENOSPC;
753
754 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
755 memset(pkt_desc, 0, tx_pkt_desc_sz);
756 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
757 rtw_tx_fill_tx_desc(pkt_info, skb);
758 dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
759 PCI_DMA_TODEVICE);
760 if (pci_dma_mapping_error(rtwpci->pdev, dma))
761 return -EBUSY;
762
763
764 buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
765 memset(buf_desc, 0, tx_buf_desc_sz);
766 psb_len = (skb->len - 1) / 128 + 1;
767 if (queue == RTW_TX_QUEUE_BCN)
768 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
769
770 buf_desc[0].psb_len = cpu_to_le16(psb_len);
771 buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
772 buf_desc[0].dma = cpu_to_le32(dma);
773 buf_desc[1].buf_size = cpu_to_le16(size);
774 buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
775
776 tx_data = rtw_pci_get_tx_data(skb);
777 tx_data->dma = dma;
778 tx_data->sn = pkt_info->sn;
779
780 spin_lock_bh(&rtwpci->irq_lock);
781
782 skb_queue_tail(&ring->queue, skb);
783
784 if (queue == RTW_TX_QUEUE_BCN)
785 goto out_unlock;
786
787
788 set_bit(queue, rtwpci->tx_queued);
789 if (++ring->r.wp >= ring->r.len)
790 ring->r.wp = 0;
791
792out_unlock:
793 spin_unlock_bh(&rtwpci->irq_lock);
794
795 return 0;
796}
797
798static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
799 u32 size)
800{
801 struct sk_buff *skb;
802 struct rtw_tx_pkt_info pkt_info = {0};
803 u8 reg_bcn_work;
804 int ret;
805
806 skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
807 if (!skb)
808 return -ENOMEM;
809
810 ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
811 if (ret) {
812 rtw_err(rtwdev, "failed to write rsvd page data\n");
813 return ret;
814 }
815
816
817 reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
818 reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
819 rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
820
821 return 0;
822}
823
824static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
825{
826 struct sk_buff *skb;
827 struct rtw_tx_pkt_info pkt_info = {0};
828 int ret;
829
830 skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
831 if (!skb)
832 return -ENOMEM;
833
834 ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
835 if (ret) {
836 rtw_err(rtwdev, "failed to write h2c data\n");
837 return ret;
838 }
839
840 rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
841
842 return 0;
843}
844
845static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
846 struct rtw_tx_pkt_info *pkt_info,
847 struct sk_buff *skb)
848{
849 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
850 struct rtw_pci_tx_ring *ring;
851 u8 queue = rtw_hw_queue_mapping(skb);
852 int ret;
853
854 ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
855 if (ret)
856 return ret;
857
858 ring = &rtwpci->tx_rings[queue];
859 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
860 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
861 ring->queue_stopped = true;
862 }
863
864 return 0;
865}
866
867static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
868 u8 hw_queue)
869{
870 struct ieee80211_hw *hw = rtwdev->hw;
871 struct ieee80211_tx_info *info;
872 struct rtw_pci_tx_ring *ring;
873 struct rtw_pci_tx_data *tx_data;
874 struct sk_buff *skb;
875 u32 count;
876 u32 bd_idx_addr;
877 u32 bd_idx, cur_rp;
878 u16 q_map;
879
880 ring = &rtwpci->tx_rings[hw_queue];
881
882 bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
883 bd_idx = rtw_read32(rtwdev, bd_idx_addr);
884 cur_rp = bd_idx >> 16;
885 cur_rp &= TRX_BD_IDX_MASK;
886 if (cur_rp >= ring->r.rp)
887 count = cur_rp - ring->r.rp;
888 else
889 count = ring->r.len - (ring->r.rp - cur_rp);
890
891 while (count--) {
892 skb = skb_dequeue(&ring->queue);
893 if (!skb) {
894 rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
895 count, hw_queue, bd_idx, ring->r.rp, cur_rp);
896 break;
897 }
898 tx_data = rtw_pci_get_tx_data(skb);
899 pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
900 PCI_DMA_TODEVICE);
901
902
903 if (hw_queue == RTW_TX_QUEUE_H2C) {
904 dev_kfree_skb_irq(skb);
905 continue;
906 }
907
908 if (ring->queue_stopped &&
909 avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
910 q_map = skb_get_queue_mapping(skb);
911 ieee80211_wake_queue(hw, q_map);
912 ring->queue_stopped = false;
913 }
914
915 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
916
917 info = IEEE80211_SKB_CB(skb);
918
919
920 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
921 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
922 continue;
923 }
924
925
926 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
927 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
928 else
929 info->flags |= IEEE80211_TX_STAT_ACK;
930
931 ieee80211_tx_info_clear_status(info);
932 ieee80211_tx_status_irqsafe(hw, skb);
933 }
934
935 ring->r.rp = cur_rp;
936}
937
938static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
939 u8 hw_queue)
940{
941 struct rtw_chip_info *chip = rtwdev->chip;
942 struct rtw_pci_rx_ring *ring;
943 struct rtw_rx_pkt_stat pkt_stat;
944 struct ieee80211_rx_status rx_status;
945 struct sk_buff *skb, *new;
946 u32 cur_wp, cur_rp, tmp;
947 u32 count;
948 u32 pkt_offset;
949 u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
950 u32 buf_desc_sz = chip->rx_buf_desc_sz;
951 u32 new_len;
952 u8 *rx_desc;
953 dma_addr_t dma;
954
955 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
956
957 tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
958 cur_wp = tmp >> 16;
959 cur_wp &= TRX_BD_IDX_MASK;
960 if (cur_wp >= ring->r.wp)
961 count = cur_wp - ring->r.wp;
962 else
963 count = ring->r.len - (ring->r.wp - cur_wp);
964
965 cur_rp = ring->r.rp;
966 while (count--) {
967 rtw_pci_dma_check(rtwdev, ring, cur_rp);
968 skb = ring->buf[cur_rp];
969 dma = *((dma_addr_t *)skb->cb);
970 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
971 DMA_FROM_DEVICE);
972 rx_desc = skb->data;
973 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
974
975
976 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
977 pkt_stat.shift;
978
979
980
981
982 new_len = pkt_stat.pkt_len + pkt_offset;
983 new = dev_alloc_skb(new_len);
984 if (WARN_ONCE(!new, "rx routine starvation\n"))
985 goto next_rp;
986
987
988 skb_put_data(new, skb->data, new_len);
989
990 if (pkt_stat.is_c2h) {
991 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
992 } else {
993
994 skb_pull(new, pkt_offset);
995
996 rtw_rx_stats(rtwdev, pkt_stat.vif, new);
997 memcpy(new->cb, &rx_status, sizeof(rx_status));
998 ieee80211_rx_irqsafe(rtwdev->hw, new);
999 }
1000
1001next_rp:
1002
1003 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1004 buf_desc_sz);
1005
1006
1007 if (++cur_rp >= ring->r.len)
1008 cur_rp = 0;
1009 }
1010
1011 ring->r.rp = cur_rp;
1012 ring->r.wp = cur_wp;
1013 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1014}
1015
1016static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1017 struct rtw_pci *rtwpci, u32 *irq_status)
1018{
1019 unsigned long flags;
1020
1021 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1022
1023 irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1024 irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1025 if (rtw_chip_wcpu_11ac(rtwdev))
1026 irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1027 else
1028 irq_status[3] = 0;
1029 irq_status[0] &= rtwpci->irq_mask[0];
1030 irq_status[1] &= rtwpci->irq_mask[1];
1031 irq_status[3] &= rtwpci->irq_mask[3];
1032 rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1033 rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1034 if (rtw_chip_wcpu_11ac(rtwdev))
1035 rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1036
1037 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1038}
1039
1040static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1041{
1042 struct rtw_dev *rtwdev = dev;
1043 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053 rtw_pci_disable_interrupt(rtwdev, rtwpci);
1054
1055 return IRQ_WAKE_THREAD;
1056}
1057
1058static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1059{
1060 struct rtw_dev *rtwdev = dev;
1061 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1062 u32 irq_status[4];
1063
1064 spin_lock_bh(&rtwpci->irq_lock);
1065 rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1066
1067 if (irq_status[0] & IMR_MGNTDOK)
1068 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1069 if (irq_status[0] & IMR_HIGHDOK)
1070 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1071 if (irq_status[0] & IMR_BEDOK)
1072 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1073 if (irq_status[0] & IMR_BKDOK)
1074 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1075 if (irq_status[0] & IMR_VODOK)
1076 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1077 if (irq_status[0] & IMR_VIDOK)
1078 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1079 if (irq_status[3] & IMR_H2CDOK)
1080 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1081 if (irq_status[0] & IMR_ROK)
1082 rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
1083
1084
1085 rtw_pci_enable_interrupt(rtwdev, rtwpci);
1086 spin_unlock_bh(&rtwpci->irq_lock);
1087
1088 return IRQ_HANDLED;
1089}
1090
1091static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1092 struct pci_dev *pdev)
1093{
1094 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1095 unsigned long len;
1096 u8 bar_id = 2;
1097 int ret;
1098
1099 ret = pci_request_regions(pdev, KBUILD_MODNAME);
1100 if (ret) {
1101 rtw_err(rtwdev, "failed to request pci regions\n");
1102 return ret;
1103 }
1104
1105 len = pci_resource_len(pdev, bar_id);
1106 rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1107 if (!rtwpci->mmap) {
1108 pci_release_regions(pdev);
1109 rtw_err(rtwdev, "failed to map pci memory\n");
1110 return -ENOMEM;
1111 }
1112
1113 return 0;
1114}
1115
1116static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1117 struct pci_dev *pdev)
1118{
1119 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1120
1121 if (rtwpci->mmap) {
1122 pci_iounmap(pdev, rtwpci->mmap);
1123 pci_release_regions(pdev);
1124 }
1125}
1126
1127static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1128{
1129 u16 write_addr;
1130 u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1131 u8 flag;
1132 u8 cnt;
1133
1134 write_addr = addr & BITS_DBI_ADDR_MASK;
1135 write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1136 rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1137 rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1138 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1139
1140 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1141 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1142 if (flag == 0)
1143 return;
1144
1145 udelay(10);
1146 }
1147
1148 WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1149}
1150
1151static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1152{
1153 u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1154 u8 flag;
1155 u8 cnt;
1156
1157 rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1158 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1159
1160 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1161 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1162 if (flag == 0) {
1163 read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1164 *value = rtw_read8(rtwdev, read_addr);
1165 return 0;
1166 }
1167
1168 udelay(10);
1169 }
1170
1171 WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1172 return -EIO;
1173}
1174
1175static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1176{
1177 u8 page;
1178 u8 wflag;
1179 u8 cnt;
1180
1181 rtw_write16(rtwdev, REG_MDIO_V1, data);
1182
1183 page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1184 page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1185 rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1186 rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1187 rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1188
1189 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1190 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1191 BIT_MDIO_WFLAG_V1);
1192 if (wflag == 0)
1193 return;
1194
1195 udelay(10);
1196 }
1197
1198 WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1199}
1200
1201static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1202{
1203 u8 value;
1204 int ret;
1205
1206 if (rtw_pci_disable_aspm)
1207 return;
1208
1209 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1210 if (ret) {
1211 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1212 return;
1213 }
1214
1215 if (enable)
1216 value |= BIT_CLKREQ_SW_EN;
1217 else
1218 value &= ~BIT_CLKREQ_SW_EN;
1219
1220 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1221}
1222
1223static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1224{
1225 u8 value;
1226 int ret;
1227
1228 if (rtw_pci_disable_aspm)
1229 return;
1230
1231 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1232 if (ret) {
1233 rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1234 return;
1235 }
1236
1237 if (enable)
1238 value |= BIT_L1_SW_EN;
1239 else
1240 value &= ~BIT_L1_SW_EN;
1241
1242 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1243}
1244
1245static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1246{
1247 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259 if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
1260 rtw_pci_aspm_set(rtwdev, enter);
1261}
1262
1263static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1264{
1265 struct rtw_chip_info *chip = rtwdev->chip;
1266 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1267 struct pci_dev *pdev = rtwpci->pdev;
1268 u16 link_ctrl;
1269 int ret;
1270
1271
1272
1273
1274 if (chip->id == RTW_CHIP_TYPE_8822C)
1275 rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1294 if (ret) {
1295 rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1296 return;
1297 }
1298
1299 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1300 rtw_pci_clkreq_set(rtwdev, true);
1301
1302 rtwpci->link_ctrl = link_ctrl;
1303}
1304
1305static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1306{
1307 struct rtw_chip_info *chip = rtwdev->chip;
1308
1309 switch (chip->id) {
1310 case RTW_CHIP_TYPE_8822C:
1311 if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1312 rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1313 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1314 break;
1315 default:
1316 break;
1317 }
1318}
1319
1320static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1321{
1322 struct rtw_chip_info *chip = rtwdev->chip;
1323 const struct rtw_intf_phy_para *para;
1324 u16 cut;
1325 u16 value;
1326 u16 offset;
1327 int i;
1328
1329 cut = BIT(0) << rtwdev->hal.cut_version;
1330
1331 for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1332 para = &chip->intf_table->gen1_para[i];
1333 if (!(para->cut_mask & cut))
1334 continue;
1335 if (para->offset == 0xffff)
1336 break;
1337 offset = para->offset;
1338 value = para->value;
1339 if (para->ip_sel == RTW_IP_SEL_PHY)
1340 rtw_mdio_write(rtwdev, offset, value, true);
1341 else
1342 rtw_dbi_write8(rtwdev, offset, value);
1343 }
1344
1345 for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1346 para = &chip->intf_table->gen2_para[i];
1347 if (!(para->cut_mask & cut))
1348 continue;
1349 if (para->offset == 0xffff)
1350 break;
1351 offset = para->offset;
1352 value = para->value;
1353 if (para->ip_sel == RTW_IP_SEL_PHY)
1354 rtw_mdio_write(rtwdev, offset, value, false);
1355 else
1356 rtw_dbi_write8(rtwdev, offset, value);
1357 }
1358
1359 rtw_pci_link_cfg(rtwdev);
1360}
1361
1362static int __maybe_unused rtw_pci_suspend(struct device *dev)
1363{
1364 return 0;
1365}
1366
1367static int __maybe_unused rtw_pci_resume(struct device *dev)
1368{
1369 return 0;
1370}
1371
1372SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1373EXPORT_SYMBOL(rtw_pm_ops);
1374
1375static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1376{
1377 int ret;
1378
1379 ret = pci_enable_device(pdev);
1380 if (ret) {
1381 rtw_err(rtwdev, "failed to enable pci device\n");
1382 return ret;
1383 }
1384
1385 pci_set_master(pdev);
1386 pci_set_drvdata(pdev, rtwdev->hw);
1387 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1388
1389 return 0;
1390}
1391
1392static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1393{
1394 pci_clear_master(pdev);
1395 pci_disable_device(pdev);
1396}
1397
1398static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1399{
1400 struct rtw_pci *rtwpci;
1401 int ret;
1402
1403 rtwpci = (struct rtw_pci *)rtwdev->priv;
1404 rtwpci->pdev = pdev;
1405
1406
1407 ret = rtw_pci_io_mapping(rtwdev, pdev);
1408 if (ret) {
1409 rtw_err(rtwdev, "failed to request pci io region\n");
1410 goto err_out;
1411 }
1412
1413 ret = rtw_pci_init(rtwdev);
1414 if (ret) {
1415 rtw_err(rtwdev, "failed to allocate pci resources\n");
1416 goto err_io_unmap;
1417 }
1418
1419 return 0;
1420
1421err_io_unmap:
1422 rtw_pci_io_unmapping(rtwdev, pdev);
1423
1424err_out:
1425 return ret;
1426}
1427
1428static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1429{
1430 rtw_pci_deinit(rtwdev);
1431 rtw_pci_io_unmapping(rtwdev, pdev);
1432}
1433
1434static struct rtw_hci_ops rtw_pci_ops = {
1435 .tx_write = rtw_pci_tx_write,
1436 .tx_kick_off = rtw_pci_tx_kick_off,
1437 .setup = rtw_pci_setup,
1438 .start = rtw_pci_start,
1439 .stop = rtw_pci_stop,
1440 .deep_ps = rtw_pci_deep_ps,
1441 .link_ps = rtw_pci_link_ps,
1442 .interface_cfg = rtw_pci_interface_cfg,
1443
1444 .read8 = rtw_pci_read8,
1445 .read16 = rtw_pci_read16,
1446 .read32 = rtw_pci_read32,
1447 .write8 = rtw_pci_write8,
1448 .write16 = rtw_pci_write16,
1449 .write32 = rtw_pci_write32,
1450 .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1451 .write_data_h2c = rtw_pci_write_data_h2c,
1452};
1453
1454static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1455{
1456 unsigned int flags = PCI_IRQ_LEGACY;
1457 int ret;
1458
1459 if (!rtw_disable_msi)
1460 flags |= PCI_IRQ_MSI;
1461
1462 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1463 if (ret < 0) {
1464 rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1465 return ret;
1466 }
1467
1468 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1469 rtw_pci_interrupt_handler,
1470 rtw_pci_interrupt_threadfn,
1471 IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1472 if (ret) {
1473 rtw_err(rtwdev, "failed to request irq %d\n", ret);
1474 pci_free_irq_vectors(pdev);
1475 }
1476
1477 return ret;
1478}
1479
1480static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1481{
1482 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1483 pci_free_irq_vectors(pdev);
1484}
1485
1486int rtw_pci_probe(struct pci_dev *pdev,
1487 const struct pci_device_id *id)
1488{
1489 struct ieee80211_hw *hw;
1490 struct rtw_dev *rtwdev;
1491 int drv_data_size;
1492 int ret;
1493
1494 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1495 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1496 if (!hw) {
1497 dev_err(&pdev->dev, "failed to allocate hw\n");
1498 return -ENOMEM;
1499 }
1500
1501 rtwdev = hw->priv;
1502 rtwdev->hw = hw;
1503 rtwdev->dev = &pdev->dev;
1504 rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1505 rtwdev->hci.ops = &rtw_pci_ops;
1506 rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1507
1508 ret = rtw_core_init(rtwdev);
1509 if (ret)
1510 goto err_release_hw;
1511
1512 rtw_dbg(rtwdev, RTW_DBG_PCI,
1513 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1514 pdev->vendor, pdev->device, pdev->revision);
1515
1516 ret = rtw_pci_claim(rtwdev, pdev);
1517 if (ret) {
1518 rtw_err(rtwdev, "failed to claim pci device\n");
1519 goto err_deinit_core;
1520 }
1521
1522 ret = rtw_pci_setup_resource(rtwdev, pdev);
1523 if (ret) {
1524 rtw_err(rtwdev, "failed to setup pci resources\n");
1525 goto err_pci_declaim;
1526 }
1527
1528 ret = rtw_chip_info_setup(rtwdev);
1529 if (ret) {
1530 rtw_err(rtwdev, "failed to setup chip information\n");
1531 goto err_destroy_pci;
1532 }
1533
1534 rtw_pci_phy_cfg(rtwdev);
1535
1536 ret = rtw_register_hw(rtwdev, hw);
1537 if (ret) {
1538 rtw_err(rtwdev, "failed to register hw\n");
1539 goto err_destroy_pci;
1540 }
1541
1542 ret = rtw_pci_request_irq(rtwdev, pdev);
1543 if (ret) {
1544 ieee80211_unregister_hw(hw);
1545 goto err_destroy_pci;
1546 }
1547
1548 return 0;
1549
1550err_destroy_pci:
1551 rtw_pci_destroy(rtwdev, pdev);
1552
1553err_pci_declaim:
1554 rtw_pci_declaim(rtwdev, pdev);
1555
1556err_deinit_core:
1557 rtw_core_deinit(rtwdev);
1558
1559err_release_hw:
1560 ieee80211_free_hw(hw);
1561
1562 return ret;
1563}
1564EXPORT_SYMBOL(rtw_pci_probe);
1565
1566void rtw_pci_remove(struct pci_dev *pdev)
1567{
1568 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1569 struct rtw_dev *rtwdev;
1570 struct rtw_pci *rtwpci;
1571
1572 if (!hw)
1573 return;
1574
1575 rtwdev = hw->priv;
1576 rtwpci = (struct rtw_pci *)rtwdev->priv;
1577
1578 rtw_unregister_hw(rtwdev, hw);
1579 rtw_pci_disable_interrupt(rtwdev, rtwpci);
1580 rtw_pci_destroy(rtwdev, pdev);
1581 rtw_pci_declaim(rtwdev, pdev);
1582 rtw_pci_free_irq(rtwdev, pdev);
1583 rtw_core_deinit(rtwdev);
1584 ieee80211_free_hw(hw);
1585}
1586EXPORT_SYMBOL(rtw_pci_remove);
1587
1588void rtw_pci_shutdown(struct pci_dev *pdev)
1589{
1590 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1591 struct rtw_dev *rtwdev;
1592 struct rtw_chip_info *chip;
1593
1594 if (!hw)
1595 return;
1596
1597 rtwdev = hw->priv;
1598 chip = rtwdev->chip;
1599
1600 if (chip->ops->shutdown)
1601 chip->ops->shutdown(rtwdev);
1602}
1603EXPORT_SYMBOL(rtw_pci_shutdown);
1604
1605MODULE_AUTHOR("Realtek Corporation");
1606MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1607MODULE_LICENSE("Dual BSD/GPL");
1608