1
2
3
4#include "mt7921.h"
5#include "../dma.h"
6#include "mac.h"
7
8int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
9{
10 int i, err;
11
12 err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
13 if (err < 0)
14 return err;
15
16 for (i = 0; i <= MT_TXQ_PSD; i++)
17 phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
18
19 return 0;
20}
21
22void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
23 struct sk_buff *skb)
24{
25 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
26 __le32 *rxd = (__le32 *)skb->data;
27 enum rx_pkt_type type;
28 u16 flag;
29
30 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
31 flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
32
33 if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
34 type = PKT_TYPE_NORMAL_MCU;
35
36 switch (type) {
37 case PKT_TYPE_TXRX_NOTIFY:
38 mt7921_mac_tx_free(dev, skb);
39 break;
40 case PKT_TYPE_RX_EVENT:
41 mt7921_mcu_rx_event(dev, skb);
42 break;
43 case PKT_TYPE_NORMAL_MCU:
44 case PKT_TYPE_NORMAL:
45 if (!mt7921_mac_fill_rx(dev, skb)) {
46 mt76_rx(&dev->mt76, q, skb);
47 return;
48 }
49 fallthrough;
50 default:
51 dev_kfree_skb(skb);
52 break;
53 }
54}
55
56void mt7921_tx_cleanup(struct mt7921_dev *dev)
57{
58 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
59 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
60}
61
62static int mt7921_poll_tx(struct napi_struct *napi, int budget)
63{
64 struct mt7921_dev *dev;
65
66 dev = container_of(napi, struct mt7921_dev, mt76.tx_napi);
67
68 if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
69 napi_complete(napi);
70 queue_work(dev->mt76.wq, &dev->pm.wake_work);
71 return 0;
72 }
73
74 mt7921_tx_cleanup(dev);
75 if (napi_complete(napi))
76 mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
77 mt76_connac_pm_unref(&dev->mphy, &dev->pm);
78
79 return 0;
80}
81
82static int mt7921_poll_rx(struct napi_struct *napi, int budget)
83{
84 struct mt7921_dev *dev;
85 int done;
86
87 dev = container_of(napi->dev, struct mt7921_dev, mt76.napi_dev);
88
89 if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
90 napi_complete(napi);
91 queue_work(dev->mt76.wq, &dev->pm.wake_work);
92 return 0;
93 }
94 done = mt76_dma_rx_poll(napi, budget);
95 mt76_connac_pm_unref(&dev->mphy, &dev->pm);
96
97 return done;
98}
99
100static void mt7921_dma_prefetch(struct mt7921_dev *dev)
101{
102#define PREFETCH(base, depth) ((base) << 16 | (depth))
103
104 mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
105 mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
106 mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
107 mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
108 mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
109
110 mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
111 mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
112 mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
113 mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
114 mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
115 mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
116 mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
117 mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
118 mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
119}
120
121static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
122{
123 static const struct {
124 u32 phys;
125 u32 mapped;
126 u32 size;
127 } fixed_map[] = {
128 { 0x00400000, 0x80000, 0x10000},
129 { 0x00410000, 0x90000, 0x10000},
130 { 0x40000000, 0x70000, 0x10000},
131 { 0x54000000, 0x02000, 0x1000 },
132 { 0x55000000, 0x03000, 0x1000 },
133 { 0x58000000, 0x06000, 0x1000 },
134 { 0x59000000, 0x07000, 0x1000 },
135 { 0x7c000000, 0xf0000, 0x10000 },
136 { 0x7c020000, 0xd0000, 0x10000 },
137 { 0x7c060000, 0xe0000, 0x10000},
138 { 0x80020000, 0xb0000, 0x10000 },
139 { 0x81020000, 0xc0000, 0x10000 },
140 { 0x820c0000, 0x08000, 0x4000 },
141 { 0x820c8000, 0x0c000, 0x2000 },
142 { 0x820cc000, 0x0e000, 0x2000 },
143 { 0x820ce000, 0x21c00, 0x0200 },
144 { 0x820cf000, 0x22000, 0x1000 },
145 { 0x820d0000, 0x30000, 0x10000 },
146 { 0x820e0000, 0x20000, 0x0400 },
147 { 0x820e1000, 0x20400, 0x0200 },
148 { 0x820e2000, 0x20800, 0x0400 },
149 { 0x820e3000, 0x20c00, 0x0400 },
150 { 0x820e4000, 0x21000, 0x0400 },
151 { 0x820e5000, 0x21400, 0x0800 },
152 { 0x820e7000, 0x21e00, 0x0200 },
153 { 0x820e9000, 0x23400, 0x0200 },
154 { 0x820ea000, 0x24000, 0x0200 },
155 { 0x820eb000, 0x24200, 0x0400 },
156 { 0x820ec000, 0x24600, 0x0200 },
157 { 0x820ed000, 0x24800, 0x0800 },
158 { 0x820f0000, 0xa0000, 0x0400 },
159 { 0x820f1000, 0xa0600, 0x0200 },
160 { 0x820f2000, 0xa0800, 0x0400 },
161 { 0x820f3000, 0xa0c00, 0x0400 },
162 { 0x820f4000, 0xa1000, 0x0400 },
163 { 0x820f5000, 0xa1400, 0x0800 },
164 { 0x820f7000, 0xa1e00, 0x0200 },
165 { 0x820f9000, 0xa3400, 0x0200 },
166 { 0x820fa000, 0xa4000, 0x0200 },
167 { 0x820fb000, 0xa4200, 0x0400 },
168 { 0x820fc000, 0xa4600, 0x0200 },
169 { 0x820fd000, 0xa4800, 0x0800 },
170 };
171 int i;
172
173 if (addr < 0x100000)
174 return addr;
175
176 for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
177 u32 ofs;
178
179 if (addr < fixed_map[i].phys)
180 continue;
181
182 ofs = addr - fixed_map[i].phys;
183 if (ofs > fixed_map[i].size)
184 continue;
185
186 return fixed_map[i].mapped + ofs;
187 }
188
189 if ((addr >= 0x18000000 && addr < 0x18c00000) ||
190 (addr >= 0x70000000 && addr < 0x78000000) ||
191 (addr >= 0x7c000000 && addr < 0x7c400000))
192 return mt7921_reg_map_l1(dev, addr);
193
194 dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
195 addr);
196
197 return 0;
198}
199
200static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
201{
202 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
203 u32 addr = __mt7921_reg_addr(dev, offset);
204
205 return dev->bus_ops->rr(mdev, addr);
206}
207
208static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
209{
210 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
211 u32 addr = __mt7921_reg_addr(dev, offset);
212
213 dev->bus_ops->wr(mdev, addr, val);
214}
215
216static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
217{
218 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
219 u32 addr = __mt7921_reg_addr(dev, offset);
220
221 return dev->bus_ops->rmw(mdev, addr, mask, val);
222}
223
224static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
225{
226 if (force) {
227
228 mt76_clear(dev, MT_WFDMA0_RST,
229 MT_WFDMA0_RST_DMASHDL_ALL_RST |
230 MT_WFDMA0_RST_LOGIC_RST);
231
232 mt76_set(dev, MT_WFDMA0_RST,
233 MT_WFDMA0_RST_DMASHDL_ALL_RST |
234 MT_WFDMA0_RST_LOGIC_RST);
235 }
236
237
238 mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
239 MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
240 mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
241
242
243 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
244 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
245 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
246 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
247 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
248 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
249
250 if (!mt76_poll(dev, MT_WFDMA0_GLO_CFG,
251 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
252 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
253 return -ETIMEDOUT;
254
255 return 0;
256}
257
258static int mt7921_dma_enable(struct mt7921_dev *dev)
259{
260
261 mt7921_dma_prefetch(dev);
262
263
264 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
265
266
267 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
268
269 mt76_set(dev, MT_WFDMA0_GLO_CFG,
270 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
271 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
272 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
273 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
274 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
275 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
276
277 mt76_set(dev, MT_WFDMA0_GLO_CFG,
278 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
279
280 mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
281
282
283 mt7921_irq_enable(dev,
284 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
285 MT_INT_MCU_CMD);
286 mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
287
288 return 0;
289}
290
291static int mt7921_dma_reset(struct mt7921_dev *dev, bool force)
292{
293 int i, err;
294
295 err = mt7921_dma_disable(dev, force);
296 if (err)
297 return err;
298
299
300 for (i = 0; i < __MT_TXQ_MAX; i++)
301 mt76_queue_reset(dev, dev->mphy.q_tx[i]);
302
303 for (i = 0; i < __MT_MCUQ_MAX; i++)
304 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
305
306 mt76_for_each_q_rx(&dev->mt76, i)
307 mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
308
309 mt76_tx_status_check(&dev->mt76, NULL, true);
310
311 return mt7921_dma_enable(dev);
312}
313
314int mt7921_wfsys_reset(struct mt7921_dev *dev)
315{
316 mt76_clear(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
317 msleep(50);
318 mt76_set(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
319
320 if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
321 WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500))
322 return -ETIMEDOUT;
323
324 return 0;
325}
326
327int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force)
328{
329 int i, err;
330
331
332 for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
333 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
334
335 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
336 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
337
338 mt76_for_each_q_rx(&dev->mt76, i)
339 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
340
341 if (force) {
342 err = mt7921_wfsys_reset(dev);
343 if (err)
344 return err;
345 }
346 err = mt7921_dma_reset(dev, force);
347 if (err)
348 return err;
349
350 mt76_for_each_q_rx(&dev->mt76, i)
351 mt76_queue_rx_reset(dev, i);
352
353 return 0;
354}
355
356int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
357{
358 struct mt76_connac_pm *pm = &dev->pm;
359 int err;
360
361
362 if (mt7921_dma_need_reinit(dev)) {
363
364 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
365 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
366
367 err = mt7921_wpdma_reset(dev, false);
368 if (err) {
369 dev_err(dev->mt76.dev, "wpdma reset failed\n");
370 return err;
371 }
372
373
374 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
375 pm->stats.lp_wake++;
376 }
377
378 return 0;
379}
380
381int mt7921_dma_init(struct mt7921_dev *dev)
382{
383 struct mt76_bus_ops *bus_ops;
384 int ret;
385
386 dev->bus_ops = dev->mt76.bus;
387 bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
388 GFP_KERNEL);
389 if (!bus_ops)
390 return -ENOMEM;
391
392 bus_ops->rr = mt7921_rr;
393 bus_ops->wr = mt7921_wr;
394 bus_ops->rmw = mt7921_rmw;
395 dev->mt76.bus = bus_ops;
396
397 mt76_dma_attach(&dev->mt76);
398
399 ret = mt7921_dma_disable(dev, true);
400 if (ret)
401 return ret;
402
403 ret = mt7921_wfsys_reset(dev);
404 if (ret)
405 return ret;
406
407
408 ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
409 MT7921_TX_RING_SIZE);
410 if (ret)
411 return ret;
412
413 mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, 0x4);
414
415
416 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7921_TXQ_MCU_WM,
417 MT7921_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
418 if (ret)
419 return ret;
420
421
422 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7921_TXQ_FWDL,
423 MT7921_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
424 if (ret)
425 return ret;
426
427
428 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
429 MT7921_RXQ_MCU_WM,
430 MT7921_RX_MCU_RING_SIZE,
431 MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
432 if (ret)
433 return ret;
434
435
436 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
437 MT7921_RXQ_MCU_WM,
438 MT7921_RX_MCU_RING_SIZE,
439 MT_RX_BUF_SIZE, MT_WFDMA0(0x540));
440 if (ret)
441 return ret;
442
443
444 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
445 MT7921_RXQ_BAND0, MT7921_RX_RING_SIZE,
446 MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
447 if (ret)
448 return ret;
449
450 ret = mt76_init_queues(dev, mt7921_poll_rx);
451 if (ret < 0)
452 return ret;
453
454 netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
455 mt7921_poll_tx, NAPI_POLL_WEIGHT);
456 napi_enable(&dev->mt76.tx_napi);
457
458 return mt7921_dma_enable(dev);
459}
460
461void mt7921_dma_cleanup(struct mt7921_dev *dev)
462{
463
464 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
465 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
466 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
467 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
468 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
469 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
470 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
471
472
473 mt76_clear(dev, MT_WFDMA0_RST,
474 MT_WFDMA0_RST_DMASHDL_ALL_RST |
475 MT_WFDMA0_RST_LOGIC_RST);
476
477 mt76_set(dev, MT_WFDMA0_RST,
478 MT_WFDMA0_RST_DMASHDL_ALL_RST |
479 MT_WFDMA0_RST_LOGIC_RST);
480
481 mt76_dma_cleanup(&dev->mt76);
482}
483