1
2
3
4
5
6
7#include <linux/pci.h>
8#include <linux/module.h>
9#include <linux/interrupt.h>
10#include <linux/spinlock.h>
11#include <linux/bitops.h>
12
13#include "core.h"
14#include "debug.h"
15#include "coredump.h"
16
17#include "targaddrs.h"
18#include "bmi.h"
19
20#include "hif.h"
21#include "htc.h"
22
23#include "ce.h"
24#include "pci.h"
25
26enum ath10k_pci_reset_mode {
27 ATH10K_PCI_RESET_AUTO = 0,
28 ATH10K_PCI_RESET_WARM_ONLY = 1,
29};
30
31static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
32static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
33
34module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
35MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
36
37module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
38MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
39
40
41#define ATH10K_PCI_TARGET_WAIT 3000
42#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
43
44
45
46
47#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
48
49#define QCA99X0_PCIE_BAR0_START_REG 0x81030
50#define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c
51#define QCA99X0_CPU_MEM_DATA_REG 0x4d010
52
53static const struct pci_device_id ath10k_pci_id_table[] = {
54
55 { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
56
57 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) },
58 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) },
59 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) },
60 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) },
61 { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) },
62 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) },
63 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) },
64 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) },
65 {0}
66};
67
68static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
69
70
71
72
73 { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
74 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
75
76 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
77 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
78 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
79 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
80 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
81
82 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
83 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
84 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
85 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
87
88 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
89
90 { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
91
92 { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
93
94 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
95 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
96
97 { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
98};
99
100static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
101static int ath10k_pci_cold_reset(struct ath10k *ar);
102static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
103static int ath10k_pci_init_irq(struct ath10k *ar);
104static int ath10k_pci_deinit_irq(struct ath10k *ar);
105static int ath10k_pci_request_irq(struct ath10k *ar);
106static void ath10k_pci_free_irq(struct ath10k *ar);
107static int ath10k_pci_bmi_wait(struct ath10k *ar,
108 struct ath10k_ce_pipe *tx_pipe,
109 struct ath10k_ce_pipe *rx_pipe,
110 struct bmi_xfer *xfer);
111static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
112static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
113static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
114static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
115static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
116static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
117static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
118
119static struct ce_attr host_ce_config_wlan[] = {
120
121 {
122 .flags = CE_ATTR_FLAGS,
123 .src_nentries = 16,
124 .src_sz_max = 256,
125 .dest_nentries = 0,
126 .send_cb = ath10k_pci_htc_tx_cb,
127 },
128
129
130 {
131 .flags = CE_ATTR_FLAGS,
132 .src_nentries = 0,
133 .src_sz_max = 2048,
134 .dest_nentries = 512,
135 .recv_cb = ath10k_pci_htt_htc_rx_cb,
136 },
137
138
139 {
140 .flags = CE_ATTR_FLAGS,
141 .src_nentries = 0,
142 .src_sz_max = 2048,
143 .dest_nentries = 128,
144 .recv_cb = ath10k_pci_htc_rx_cb,
145 },
146
147
148 {
149 .flags = CE_ATTR_FLAGS,
150 .src_nentries = 32,
151 .src_sz_max = 2048,
152 .dest_nentries = 0,
153 .send_cb = ath10k_pci_htc_tx_cb,
154 },
155
156
157 {
158 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
159 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
160 .src_sz_max = 256,
161 .dest_nentries = 0,
162 .send_cb = ath10k_pci_htt_tx_cb,
163 },
164
165
166 {
167 .flags = CE_ATTR_FLAGS,
168 .src_nentries = 0,
169 .src_sz_max = 512,
170 .dest_nentries = 512,
171 .recv_cb = ath10k_pci_htt_rx_cb,
172 },
173
174
175 {
176 .flags = CE_ATTR_FLAGS,
177 .src_nentries = 0,
178 .src_sz_max = 0,
179 .dest_nentries = 0,
180 },
181
182
183 {
184 .flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
185 .src_nentries = 2,
186 .src_sz_max = DIAG_TRANSFER_LIMIT,
187 .dest_nentries = 2,
188 },
189
190
191 {
192 .flags = CE_ATTR_FLAGS,
193 .src_nentries = 0,
194 .src_sz_max = 2048,
195 .dest_nentries = 128,
196 .recv_cb = ath10k_pci_pktlog_rx_cb,
197 },
198
199
200 {
201 .flags = CE_ATTR_FLAGS,
202 .src_nentries = 0,
203 .src_sz_max = 0,
204 .dest_nentries = 0,
205 },
206
207
208 {
209 .flags = CE_ATTR_FLAGS,
210 .src_nentries = 0,
211 .src_sz_max = 0,
212 .dest_nentries = 0,
213 },
214
215
216 {
217 .flags = CE_ATTR_FLAGS,
218 .src_nentries = 0,
219 .src_sz_max = 0,
220 .dest_nentries = 0,
221 },
222};
223
224
225static struct ce_pipe_config target_ce_config_wlan[] = {
226
227 {
228 .pipenum = __cpu_to_le32(0),
229 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
230 .nentries = __cpu_to_le32(32),
231 .nbytes_max = __cpu_to_le32(256),
232 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
233 .reserved = __cpu_to_le32(0),
234 },
235
236
237 {
238 .pipenum = __cpu_to_le32(1),
239 .pipedir = __cpu_to_le32(PIPEDIR_IN),
240 .nentries = __cpu_to_le32(32),
241 .nbytes_max = __cpu_to_le32(2048),
242 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
243 .reserved = __cpu_to_le32(0),
244 },
245
246
247 {
248 .pipenum = __cpu_to_le32(2),
249 .pipedir = __cpu_to_le32(PIPEDIR_IN),
250 .nentries = __cpu_to_le32(64),
251 .nbytes_max = __cpu_to_le32(2048),
252 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
253 .reserved = __cpu_to_le32(0),
254 },
255
256
257 {
258 .pipenum = __cpu_to_le32(3),
259 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
260 .nentries = __cpu_to_le32(32),
261 .nbytes_max = __cpu_to_le32(2048),
262 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
263 .reserved = __cpu_to_le32(0),
264 },
265
266
267 {
268 .pipenum = __cpu_to_le32(4),
269 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
270 .nentries = __cpu_to_le32(256),
271 .nbytes_max = __cpu_to_le32(256),
272 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
273 .reserved = __cpu_to_le32(0),
274 },
275
276
277
278
279 {
280 .pipenum = __cpu_to_le32(5),
281 .pipedir = __cpu_to_le32(PIPEDIR_IN),
282 .nentries = __cpu_to_le32(32),
283 .nbytes_max = __cpu_to_le32(512),
284 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
285 .reserved = __cpu_to_le32(0),
286 },
287
288
289 {
290 .pipenum = __cpu_to_le32(6),
291 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
292 .nentries = __cpu_to_le32(32),
293 .nbytes_max = __cpu_to_le32(4096),
294 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
295 .reserved = __cpu_to_le32(0),
296 },
297
298
299 {
300 .pipenum = __cpu_to_le32(7),
301 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
302 .nentries = __cpu_to_le32(0),
303 .nbytes_max = __cpu_to_le32(0),
304 .flags = __cpu_to_le32(0),
305 .reserved = __cpu_to_le32(0),
306 },
307
308
309 {
310 .pipenum = __cpu_to_le32(8),
311 .pipedir = __cpu_to_le32(PIPEDIR_IN),
312 .nentries = __cpu_to_le32(64),
313 .nbytes_max = __cpu_to_le32(2048),
314 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
315 .reserved = __cpu_to_le32(0),
316 },
317
318
319 {
320 .pipenum = __cpu_to_le32(9),
321 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
322 .nentries = __cpu_to_le32(32),
323 .nbytes_max = __cpu_to_le32(2048),
324 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
325 .reserved = __cpu_to_le32(0),
326 },
327
328
329
330
331};
332
333
334
335
336
337
338static struct service_to_pipe target_service_to_ce_map_wlan[] = {
339 {
340 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
341 __cpu_to_le32(PIPEDIR_OUT),
342 __cpu_to_le32(3),
343 },
344 {
345 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
346 __cpu_to_le32(PIPEDIR_IN),
347 __cpu_to_le32(2),
348 },
349 {
350 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
351 __cpu_to_le32(PIPEDIR_OUT),
352 __cpu_to_le32(3),
353 },
354 {
355 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
356 __cpu_to_le32(PIPEDIR_IN),
357 __cpu_to_le32(2),
358 },
359 {
360 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
361 __cpu_to_le32(PIPEDIR_OUT),
362 __cpu_to_le32(3),
363 },
364 {
365 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
366 __cpu_to_le32(PIPEDIR_IN),
367 __cpu_to_le32(2),
368 },
369 {
370 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
371 __cpu_to_le32(PIPEDIR_OUT),
372 __cpu_to_le32(3),
373 },
374 {
375 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
376 __cpu_to_le32(PIPEDIR_IN),
377 __cpu_to_le32(2),
378 },
379 {
380 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
381 __cpu_to_le32(PIPEDIR_OUT),
382 __cpu_to_le32(3),
383 },
384 {
385 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
386 __cpu_to_le32(PIPEDIR_IN),
387 __cpu_to_le32(2),
388 },
389 {
390 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
391 __cpu_to_le32(PIPEDIR_OUT),
392 __cpu_to_le32(0),
393 },
394 {
395 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
396 __cpu_to_le32(PIPEDIR_IN),
397 __cpu_to_le32(1),
398 },
399 {
400 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
401 __cpu_to_le32(PIPEDIR_OUT),
402 __cpu_to_le32(0),
403 },
404 {
405 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
406 __cpu_to_le32(PIPEDIR_IN),
407 __cpu_to_le32(1),
408 },
409 {
410 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
411 __cpu_to_le32(PIPEDIR_OUT),
412 __cpu_to_le32(4),
413 },
414 {
415 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
416 __cpu_to_le32(PIPEDIR_IN),
417 __cpu_to_le32(5),
418 },
419
420
421
422 {
423 __cpu_to_le32(0),
424 __cpu_to_le32(0),
425 __cpu_to_le32(0),
426 },
427};
428
429static bool ath10k_pci_is_awake(struct ath10k *ar)
430{
431 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
432 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
433 RTC_STATE_ADDRESS);
434
435 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
436}
437
438static void __ath10k_pci_wake(struct ath10k *ar)
439{
440 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
441
442 lockdep_assert_held(&ar_pci->ps_lock);
443
444 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
445 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
446
447 iowrite32(PCIE_SOC_WAKE_V_MASK,
448 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
449 PCIE_SOC_WAKE_ADDRESS);
450}
451
452static void __ath10k_pci_sleep(struct ath10k *ar)
453{
454 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
455
456 lockdep_assert_held(&ar_pci->ps_lock);
457
458 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
459 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
460
461 iowrite32(PCIE_SOC_WAKE_RESET,
462 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
463 PCIE_SOC_WAKE_ADDRESS);
464 ar_pci->ps_awake = false;
465}
466
467static int ath10k_pci_wake_wait(struct ath10k *ar)
468{
469 int tot_delay = 0;
470 int curr_delay = 5;
471
472 while (tot_delay < PCIE_WAKE_TIMEOUT) {
473 if (ath10k_pci_is_awake(ar)) {
474 if (tot_delay > PCIE_WAKE_LATE_US)
475 ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
476 tot_delay / 1000);
477 return 0;
478 }
479
480 udelay(curr_delay);
481 tot_delay += curr_delay;
482
483 if (curr_delay < 50)
484 curr_delay += 5;
485 }
486
487 return -ETIMEDOUT;
488}
489
490static int ath10k_pci_force_wake(struct ath10k *ar)
491{
492 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
493 unsigned long flags;
494 int ret = 0;
495
496 if (ar_pci->pci_ps)
497 return ret;
498
499 spin_lock_irqsave(&ar_pci->ps_lock, flags);
500
501 if (!ar_pci->ps_awake) {
502 iowrite32(PCIE_SOC_WAKE_V_MASK,
503 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
504 PCIE_SOC_WAKE_ADDRESS);
505
506 ret = ath10k_pci_wake_wait(ar);
507 if (ret == 0)
508 ar_pci->ps_awake = true;
509 }
510
511 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
512
513 return ret;
514}
515
516static void ath10k_pci_force_sleep(struct ath10k *ar)
517{
518 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
519 unsigned long flags;
520
521 spin_lock_irqsave(&ar_pci->ps_lock, flags);
522
523 iowrite32(PCIE_SOC_WAKE_RESET,
524 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
525 PCIE_SOC_WAKE_ADDRESS);
526 ar_pci->ps_awake = false;
527
528 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
529}
530
531static int ath10k_pci_wake(struct ath10k *ar)
532{
533 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
534 unsigned long flags;
535 int ret = 0;
536
537 if (ar_pci->pci_ps == 0)
538 return ret;
539
540 spin_lock_irqsave(&ar_pci->ps_lock, flags);
541
542 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
543 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
544
545
546
547
548 if (!ar_pci->ps_awake) {
549 __ath10k_pci_wake(ar);
550
551 ret = ath10k_pci_wake_wait(ar);
552 if (ret == 0)
553 ar_pci->ps_awake = true;
554 }
555
556 if (ret == 0) {
557 ar_pci->ps_wake_refcount++;
558 WARN_ON(ar_pci->ps_wake_refcount == 0);
559 }
560
561 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
562
563 return ret;
564}
565
566static void ath10k_pci_sleep(struct ath10k *ar)
567{
568 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
569 unsigned long flags;
570
571 if (ar_pci->pci_ps == 0)
572 return;
573
574 spin_lock_irqsave(&ar_pci->ps_lock, flags);
575
576 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
577 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
578
579 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
580 goto skip;
581
582 ar_pci->ps_wake_refcount--;
583
584 mod_timer(&ar_pci->ps_timer, jiffies +
585 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
586
587skip:
588 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
589}
590
591static void ath10k_pci_ps_timer(struct timer_list *t)
592{
593 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
594 struct ath10k *ar = ar_pci->ar;
595 unsigned long flags;
596
597 spin_lock_irqsave(&ar_pci->ps_lock, flags);
598
599 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
600 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
601
602 if (ar_pci->ps_wake_refcount > 0)
603 goto skip;
604
605 __ath10k_pci_sleep(ar);
606
607skip:
608 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
609}
610
611static void ath10k_pci_sleep_sync(struct ath10k *ar)
612{
613 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
614 unsigned long flags;
615
616 if (ar_pci->pci_ps == 0) {
617 ath10k_pci_force_sleep(ar);
618 return;
619 }
620
621 del_timer_sync(&ar_pci->ps_timer);
622
623 spin_lock_irqsave(&ar_pci->ps_lock, flags);
624 WARN_ON(ar_pci->ps_wake_refcount > 0);
625 __ath10k_pci_sleep(ar);
626 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
627}
628
629static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
630{
631 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
632 int ret;
633
634 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
635 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
636 offset, offset + sizeof(value), ar_pci->mem_len);
637 return;
638 }
639
640 ret = ath10k_pci_wake(ar);
641 if (ret) {
642 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
643 value, offset, ret);
644 return;
645 }
646
647 iowrite32(value, ar_pci->mem + offset);
648 ath10k_pci_sleep(ar);
649}
650
651static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
652{
653 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
654 u32 val;
655 int ret;
656
657 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
658 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
659 offset, offset + sizeof(val), ar_pci->mem_len);
660 return 0;
661 }
662
663 ret = ath10k_pci_wake(ar);
664 if (ret) {
665 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
666 offset, ret);
667 return 0xffffffff;
668 }
669
670 val = ioread32(ar_pci->mem + offset);
671 ath10k_pci_sleep(ar);
672
673 return val;
674}
675
676inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
677{
678 struct ath10k_ce *ce = ath10k_ce_priv(ar);
679
680 ce->bus_ops->write32(ar, offset, value);
681}
682
683inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
684{
685 struct ath10k_ce *ce = ath10k_ce_priv(ar);
686
687 return ce->bus_ops->read32(ar, offset);
688}
689
690u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
691{
692 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
693}
694
695void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
696{
697 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
698}
699
700u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
701{
702 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
703}
704
705void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
706{
707 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
708}
709
710bool ath10k_pci_irq_pending(struct ath10k *ar)
711{
712 u32 cause;
713
714
715 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
716 PCIE_INTR_CAUSE_ADDRESS);
717 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
718 return true;
719
720 return false;
721}
722
723void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
724{
725
726
727
728
729 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
730 0);
731 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
732 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
733
734
735
736
737 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
738 PCIE_INTR_ENABLE_ADDRESS);
739}
740
741void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
742{
743 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
744 PCIE_INTR_ENABLE_ADDRESS,
745 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
746
747
748
749
750 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
751 PCIE_INTR_ENABLE_ADDRESS);
752}
753
754static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
755{
756 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
757
758 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
759 return "msi";
760
761 return "legacy";
762}
763
764static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
765{
766 struct ath10k *ar = pipe->hif_ce_state;
767 struct ath10k_ce *ce = ath10k_ce_priv(ar);
768 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
769 struct sk_buff *skb;
770 dma_addr_t paddr;
771 int ret;
772
773 skb = dev_alloc_skb(pipe->buf_sz);
774 if (!skb)
775 return -ENOMEM;
776
777 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
778
779 paddr = dma_map_single(ar->dev, skb->data,
780 skb->len + skb_tailroom(skb),
781 DMA_FROM_DEVICE);
782 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
783 ath10k_warn(ar, "failed to dma map pci rx buf\n");
784 dev_kfree_skb_any(skb);
785 return -EIO;
786 }
787
788 ATH10K_SKB_RXCB(skb)->paddr = paddr;
789
790 spin_lock_bh(&ce->ce_lock);
791 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
792 spin_unlock_bh(&ce->ce_lock);
793 if (ret) {
794 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
795 DMA_FROM_DEVICE);
796 dev_kfree_skb_any(skb);
797 return ret;
798 }
799
800 return 0;
801}
802
803static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
804{
805 struct ath10k *ar = pipe->hif_ce_state;
806 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
807 struct ath10k_ce *ce = ath10k_ce_priv(ar);
808 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
809 int ret, num;
810
811 if (pipe->buf_sz == 0)
812 return;
813
814 if (!ce_pipe->dest_ring)
815 return;
816
817 spin_lock_bh(&ce->ce_lock);
818 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
819 spin_unlock_bh(&ce->ce_lock);
820
821 while (num >= 0) {
822 ret = __ath10k_pci_rx_post_buf(pipe);
823 if (ret) {
824 if (ret == -ENOSPC)
825 break;
826 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
827 mod_timer(&ar_pci->rx_post_retry, jiffies +
828 ATH10K_PCI_RX_POST_RETRY_MS);
829 break;
830 }
831 num--;
832 }
833}
834
835void ath10k_pci_rx_post(struct ath10k *ar)
836{
837 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
838 int i;
839
840 for (i = 0; i < CE_COUNT; i++)
841 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
842}
843
844void ath10k_pci_rx_replenish_retry(struct timer_list *t)
845{
846 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
847 struct ath10k *ar = ar_pci->ar;
848
849 ath10k_pci_rx_post(ar);
850}
851
852static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
853{
854 u32 val = 0, region = addr & 0xfffff;
855
856 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
857 & 0x7ff) << 21;
858 val |= 0x100000 | region;
859 return val;
860}
861
862
863
864
865
866
867static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
868{
869 u32 val = 0, region = addr & 0xfffff;
870
871 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
872 & 0x7ff) << 21;
873 val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
874 return val;
875}
876
877static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
878{
879 u32 val = 0, region = addr & 0xfffff;
880
881 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
882 val |= 0x100000 | region;
883 return val;
884}
885
886static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
887{
888 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
889
890 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
891 return -ENOTSUPP;
892
893 return ar_pci->targ_cpu_to_ce_addr(ar, addr);
894}
895
896
897
898
899
900
901static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
902 int nbytes)
903{
904 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
905 int ret = 0;
906 u32 *buf;
907 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
908 struct ath10k_ce_pipe *ce_diag;
909
910 u32 ce_data;
911 dma_addr_t ce_data_base = 0;
912 void *data_buf;
913 int i;
914
915 mutex_lock(&ar_pci->ce_diag_mutex);
916 ce_diag = ar_pci->ce_diag;
917
918
919
920
921
922
923
924 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
925
926 data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
927 GFP_ATOMIC);
928 if (!data_buf) {
929 ret = -ENOMEM;
930 goto done;
931 }
932
933
934
935
936
937
938
939
940 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
941
942 remaining_bytes = nbytes;
943 ce_data = ce_data_base;
944 while (remaining_bytes) {
945 nbytes = min_t(unsigned int, remaining_bytes,
946 DIAG_TRANSFER_LIMIT);
947
948 ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
949 if (ret != 0)
950 goto done;
951
952
953 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
954 if (ret)
955 goto done;
956
957 i = 0;
958 while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
959 udelay(DIAG_ACCESS_CE_WAIT_US);
960 i += DIAG_ACCESS_CE_WAIT_US;
961
962 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
963 ret = -EBUSY;
964 goto done;
965 }
966 }
967
968 i = 0;
969 while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
970 &completed_nbytes) != 0) {
971 udelay(DIAG_ACCESS_CE_WAIT_US);
972 i += DIAG_ACCESS_CE_WAIT_US;
973
974 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
975 ret = -EBUSY;
976 goto done;
977 }
978 }
979
980 if (nbytes != completed_nbytes) {
981 ret = -EIO;
982 goto done;
983 }
984
985 if (*buf != ce_data) {
986 ret = -EIO;
987 goto done;
988 }
989
990 remaining_bytes -= nbytes;
991 memcpy(data, data_buf, nbytes);
992
993 address += nbytes;
994 data += nbytes;
995 }
996
997done:
998
999 if (data_buf)
1000 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1001 ce_data_base);
1002
1003 mutex_unlock(&ar_pci->ce_diag_mutex);
1004
1005 return ret;
1006}
1007
1008static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1009{
1010 __le32 val = 0;
1011 int ret;
1012
1013 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1014 *value = __le32_to_cpu(val);
1015
1016 return ret;
1017}
1018
1019static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1020 u32 src, u32 len)
1021{
1022 u32 host_addr, addr;
1023 int ret;
1024
1025 host_addr = host_interest_item_address(src);
1026
1027 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1028 if (ret != 0) {
1029 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1030 src, ret);
1031 return ret;
1032 }
1033
1034 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1035 if (ret != 0) {
1036 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1037 addr, len, ret);
1038 return ret;
1039 }
1040
1041 return 0;
1042}
1043
1044#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
1045 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1046
1047int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1048 const void *data, int nbytes)
1049{
1050 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1051 int ret = 0;
1052 u32 *buf;
1053 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1054 struct ath10k_ce_pipe *ce_diag;
1055 void *data_buf;
1056 dma_addr_t ce_data_base = 0;
1057 int i;
1058
1059 mutex_lock(&ar_pci->ce_diag_mutex);
1060 ce_diag = ar_pci->ce_diag;
1061
1062
1063
1064
1065
1066
1067
1068 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1069
1070 data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
1071 GFP_ATOMIC);
1072 if (!data_buf) {
1073 ret = -ENOMEM;
1074 goto done;
1075 }
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1088
1089 remaining_bytes = nbytes;
1090 while (remaining_bytes) {
1091
1092 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1093
1094
1095 memcpy(data_buf, data, nbytes);
1096
1097
1098 ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
1099 if (ret != 0)
1100 goto done;
1101
1102
1103
1104
1105
1106 ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
1107 if (ret != 0)
1108 goto done;
1109
1110 i = 0;
1111 while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1112 udelay(DIAG_ACCESS_CE_WAIT_US);
1113 i += DIAG_ACCESS_CE_WAIT_US;
1114
1115 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1116 ret = -EBUSY;
1117 goto done;
1118 }
1119 }
1120
1121 i = 0;
1122 while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1123 &completed_nbytes) != 0) {
1124 udelay(DIAG_ACCESS_CE_WAIT_US);
1125 i += DIAG_ACCESS_CE_WAIT_US;
1126
1127 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1128 ret = -EBUSY;
1129 goto done;
1130 }
1131 }
1132
1133 if (nbytes != completed_nbytes) {
1134 ret = -EIO;
1135 goto done;
1136 }
1137
1138 if (*buf != address) {
1139 ret = -EIO;
1140 goto done;
1141 }
1142
1143 remaining_bytes -= nbytes;
1144 address += nbytes;
1145 data += nbytes;
1146 }
1147
1148done:
1149 if (data_buf) {
1150 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1151 ce_data_base);
1152 }
1153
1154 if (ret != 0)
1155 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1156 address, ret);
1157
1158 mutex_unlock(&ar_pci->ce_diag_mutex);
1159
1160 return ret;
1161}
1162
1163static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1164{
1165 __le32 val = __cpu_to_le32(value);
1166
1167 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1168}
1169
1170
1171static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1172{
1173 struct ath10k *ar = ce_state->ar;
1174 struct sk_buff_head list;
1175 struct sk_buff *skb;
1176
1177 __skb_queue_head_init(&list);
1178 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1179
1180 if (skb == NULL)
1181 continue;
1182
1183 __skb_queue_tail(&list, skb);
1184 }
1185
1186 while ((skb = __skb_dequeue(&list)))
1187 ath10k_htc_tx_completion_handler(ar, skb);
1188}
1189
1190static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1191 void (*callback)(struct ath10k *ar,
1192 struct sk_buff *skb))
1193{
1194 struct ath10k *ar = ce_state->ar;
1195 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1196 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1197 struct sk_buff *skb;
1198 struct sk_buff_head list;
1199 void *transfer_context;
1200 unsigned int nbytes, max_nbytes;
1201
1202 __skb_queue_head_init(&list);
1203 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1204 &nbytes) == 0) {
1205 skb = transfer_context;
1206 max_nbytes = skb->len + skb_tailroom(skb);
1207 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1208 max_nbytes, DMA_FROM_DEVICE);
1209
1210 if (unlikely(max_nbytes < nbytes)) {
1211 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1212 nbytes, max_nbytes);
1213 dev_kfree_skb_any(skb);
1214 continue;
1215 }
1216
1217 skb_put(skb, nbytes);
1218 __skb_queue_tail(&list, skb);
1219 }
1220
1221 while ((skb = __skb_dequeue(&list))) {
1222 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1223 ce_state->id, skb->len);
1224 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1225 skb->data, skb->len);
1226
1227 callback(ar, skb);
1228 }
1229
1230 ath10k_pci_rx_post_pipe(pipe_info);
1231}
1232
1233static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1234 void (*callback)(struct ath10k *ar,
1235 struct sk_buff *skb))
1236{
1237 struct ath10k *ar = ce_state->ar;
1238 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1239 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1240 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1241 struct sk_buff *skb;
1242 struct sk_buff_head list;
1243 void *transfer_context;
1244 unsigned int nbytes, max_nbytes, nentries;
1245 int orig_len;
1246
1247
1248
1249
1250
1251 __skb_queue_head_init(&list);
1252 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1253 &nbytes) == 0) {
1254 skb = transfer_context;
1255 max_nbytes = skb->len + skb_tailroom(skb);
1256
1257 if (unlikely(max_nbytes < nbytes)) {
1258 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1259 nbytes, max_nbytes);
1260 continue;
1261 }
1262
1263 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1264 max_nbytes, DMA_FROM_DEVICE);
1265 skb_put(skb, nbytes);
1266 __skb_queue_tail(&list, skb);
1267 }
1268
1269 nentries = skb_queue_len(&list);
1270 while ((skb = __skb_dequeue(&list))) {
1271 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1272 ce_state->id, skb->len);
1273 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1274 skb->data, skb->len);
1275
1276 orig_len = skb->len;
1277 callback(ar, skb);
1278 skb_push(skb, orig_len - skb->len);
1279 skb_reset_tail_pointer(skb);
1280 skb_trim(skb, 0);
1281
1282
1283 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1284 skb->len + skb_tailroom(skb),
1285 DMA_FROM_DEVICE);
1286 }
1287 ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1288}
1289
1290
1291static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1292{
1293 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1294}
1295
1296static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1297{
1298
1299
1300
1301 ath10k_ce_per_engine_service(ce_state->ar, 4);
1302
1303 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1304}
1305
1306
1307
1308
1309static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1310{
1311 ath10k_pci_process_rx_cb(ce_state,
1312 ath10k_htt_rx_pktlog_completion_handler);
1313}
1314
1315
1316static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1317{
1318 struct ath10k *ar = ce_state->ar;
1319 struct sk_buff *skb;
1320
1321 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1322
1323 if (!skb)
1324 continue;
1325
1326 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1327 skb->len, DMA_TO_DEVICE);
1328 ath10k_htt_hif_tx_complete(ar, skb);
1329 }
1330}
1331
1332static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1333{
1334 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1335 ath10k_htt_t2h_msg_handler(ar, skb);
1336}
1337
1338
1339static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1340{
1341
1342
1343
1344 ath10k_ce_per_engine_service(ce_state->ar, 4);
1345
1346 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1347}
1348
1349int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1350 struct ath10k_hif_sg_item *items, int n_items)
1351{
1352 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1353 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1354 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1355 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1356 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1357 unsigned int nentries_mask;
1358 unsigned int sw_index;
1359 unsigned int write_index;
1360 int err, i = 0;
1361
1362 spin_lock_bh(&ce->ce_lock);
1363
1364 nentries_mask = src_ring->nentries_mask;
1365 sw_index = src_ring->sw_index;
1366 write_index = src_ring->write_index;
1367
1368 if (unlikely(CE_RING_DELTA(nentries_mask,
1369 write_index, sw_index - 1) < n_items)) {
1370 err = -ENOBUFS;
1371 goto err;
1372 }
1373
1374 for (i = 0; i < n_items - 1; i++) {
1375 ath10k_dbg(ar, ATH10K_DBG_PCI,
1376 "pci tx item %d paddr %pad len %d n_items %d\n",
1377 i, &items[i].paddr, items[i].len, n_items);
1378 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1379 items[i].vaddr, items[i].len);
1380
1381 err = ath10k_ce_send_nolock(ce_pipe,
1382 items[i].transfer_context,
1383 items[i].paddr,
1384 items[i].len,
1385 items[i].transfer_id,
1386 CE_SEND_FLAG_GATHER);
1387 if (err)
1388 goto err;
1389 }
1390
1391
1392
1393 ath10k_dbg(ar, ATH10K_DBG_PCI,
1394 "pci tx item %d paddr %pad len %d n_items %d\n",
1395 i, &items[i].paddr, items[i].len, n_items);
1396 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1397 items[i].vaddr, items[i].len);
1398
1399 err = ath10k_ce_send_nolock(ce_pipe,
1400 items[i].transfer_context,
1401 items[i].paddr,
1402 items[i].len,
1403 items[i].transfer_id,
1404 0);
1405 if (err)
1406 goto err;
1407
1408 spin_unlock_bh(&ce->ce_lock);
1409 return 0;
1410
1411err:
1412 for (; i > 0; i--)
1413 __ath10k_ce_send_revert(ce_pipe);
1414
1415 spin_unlock_bh(&ce->ce_lock);
1416 return err;
1417}
1418
1419int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1420 size_t buf_len)
1421{
1422 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1423}
1424
1425u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1426{
1427 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1428
1429 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1430
1431 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1432}
1433
1434static void ath10k_pci_dump_registers(struct ath10k *ar,
1435 struct ath10k_fw_crash_data *crash_data)
1436{
1437 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1438 int i, ret;
1439
1440 lockdep_assert_held(&ar->dump_mutex);
1441
1442 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
1443 hi_failure_state,
1444 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1445 if (ret) {
1446 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1447 return;
1448 }
1449
1450 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1451
1452 ath10k_err(ar, "firmware register dump:\n");
1453 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1454 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1455 i,
1456 __le32_to_cpu(reg_dump_values[i]),
1457 __le32_to_cpu(reg_dump_values[i + 1]),
1458 __le32_to_cpu(reg_dump_values[i + 2]),
1459 __le32_to_cpu(reg_dump_values[i + 3]));
1460
1461 if (!crash_data)
1462 return;
1463
1464 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1465 crash_data->registers[i] = reg_dump_values[i];
1466}
1467
1468static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1469 const struct ath10k_mem_region *mem_region,
1470 u8 *buf, size_t buf_len)
1471{
1472 const struct ath10k_mem_section *cur_section, *next_section;
1473 unsigned int count, section_size, skip_size;
1474 int ret, i, j;
1475
1476 if (!mem_region || !buf)
1477 return 0;
1478
1479 cur_section = &mem_region->section_table.sections[0];
1480
1481 if (mem_region->start > cur_section->start) {
1482 ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1483 mem_region->start, cur_section->start);
1484 return 0;
1485 }
1486
1487 skip_size = cur_section->start - mem_region->start;
1488
1489
1490
1491
1492 for (i = 0; i < skip_size; i++) {
1493 *buf = ATH10K_MAGIC_NOT_COPIED;
1494 buf++;
1495 }
1496
1497 count = 0;
1498
1499 for (i = 0; cur_section != NULL; i++) {
1500 section_size = cur_section->end - cur_section->start;
1501
1502 if (section_size <= 0) {
1503 ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1504 cur_section->start,
1505 cur_section->end);
1506 break;
1507 }
1508
1509 if ((i + 1) == mem_region->section_table.size) {
1510
1511 next_section = NULL;
1512 skip_size = 0;
1513 } else {
1514 next_section = cur_section + 1;
1515
1516 if (cur_section->end > next_section->start) {
1517 ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1518 next_section->start,
1519 cur_section->end);
1520 break;
1521 }
1522
1523 skip_size = next_section->start - cur_section->end;
1524 }
1525
1526 if (buf_len < (skip_size + section_size)) {
1527 ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1528 break;
1529 }
1530
1531 buf_len -= skip_size + section_size;
1532
1533
1534 ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1535 buf, section_size);
1536 if (ret) {
1537 ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1538 cur_section->start, ret);
1539 break;
1540 }
1541
1542 buf += section_size;
1543 count += section_size;
1544
1545
1546 for (j = 0; j < skip_size; j++) {
1547 *buf = ATH10K_MAGIC_NOT_COPIED;
1548 buf++;
1549 }
1550
1551 count += skip_size;
1552
1553 if (!next_section)
1554
1555 break;
1556
1557 cur_section = next_section;
1558 }
1559
1560 return count;
1561}
1562
1563static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1564{
1565 u32 val;
1566
1567 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1568 FW_RAM_CONFIG_ADDRESS, config);
1569
1570 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1571 FW_RAM_CONFIG_ADDRESS);
1572 if (val != config) {
1573 ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1574 val, config);
1575 return -EIO;
1576 }
1577
1578 return 0;
1579}
1580
1581
1582static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
1583 const struct ath10k_mem_region *region,
1584 u8 *buf)
1585{
1586 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1587 u32 base_addr, i;
1588
1589 base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
1590 base_addr += region->start;
1591
1592 for (i = 0; i < region->len; i += 4) {
1593 iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
1594 *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
1595 }
1596
1597 return region->len;
1598}
1599
1600
1601static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1602 const struct ath10k_mem_region *region,
1603 u8 *buf)
1604{
1605 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1606 u32 i;
1607
1608 for (i = 0; i < region->len; i += 4)
1609 *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1610
1611 return region->len;
1612}
1613
1614
1615static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
1616 const struct ath10k_mem_region *current_region,
1617 u8 *buf)
1618{
1619 int ret;
1620
1621 if (current_region->section_table.size > 0)
1622
1623 return ath10k_pci_dump_memory_section(ar,
1624 current_region,
1625 buf,
1626 current_region->len);
1627
1628
1629
1630
1631 ret = ath10k_pci_diag_read_mem(ar,
1632 current_region->start,
1633 buf,
1634 current_region->len);
1635 if (ret) {
1636 ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1637 current_region->name, ret);
1638 return ret;
1639 }
1640
1641 return current_region->len;
1642}
1643
1644static void ath10k_pci_dump_memory(struct ath10k *ar,
1645 struct ath10k_fw_crash_data *crash_data)
1646{
1647 const struct ath10k_hw_mem_layout *mem_layout;
1648 const struct ath10k_mem_region *current_region;
1649 struct ath10k_dump_ram_data_hdr *hdr;
1650 u32 count, shift;
1651 size_t buf_len;
1652 int ret, i;
1653 u8 *buf;
1654
1655 lockdep_assert_held(&ar->dump_mutex);
1656
1657 if (!crash_data)
1658 return;
1659
1660 mem_layout = ath10k_coredump_get_mem_layout(ar);
1661 if (!mem_layout)
1662 return;
1663
1664 current_region = &mem_layout->region_table.regions[0];
1665
1666 buf = crash_data->ramdump_buf;
1667 buf_len = crash_data->ramdump_buf_len;
1668
1669 memset(buf, 0, buf_len);
1670
1671 for (i = 0; i < mem_layout->region_table.size; i++) {
1672 count = 0;
1673
1674 if (current_region->len > buf_len) {
1675 ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1676 current_region->name,
1677 current_region->len,
1678 buf_len);
1679 break;
1680 }
1681
1682
1683
1684
1685 if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1686 current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1687 shift = current_region->start >> 20;
1688
1689 ret = ath10k_pci_set_ram_config(ar, shift);
1690 if (ret) {
1691 ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1692 current_region->name, ret);
1693 break;
1694 }
1695 }
1696
1697
1698 hdr = (void *)buf;
1699 buf += sizeof(*hdr);
1700 buf_len -= sizeof(*hdr);
1701
1702 switch (current_region->type) {
1703 case ATH10K_MEM_REGION_TYPE_IOSRAM:
1704 count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1705 break;
1706 case ATH10K_MEM_REGION_TYPE_IOREG:
1707 count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1708 break;
1709 default:
1710 ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1711 if (ret < 0)
1712 break;
1713
1714 count = ret;
1715 break;
1716 }
1717
1718 hdr->region_type = cpu_to_le32(current_region->type);
1719 hdr->start = cpu_to_le32(current_region->start);
1720 hdr->length = cpu_to_le32(count);
1721
1722 if (count == 0)
1723
1724 break;
1725
1726 buf += count;
1727 buf_len -= count;
1728
1729 current_region++;
1730 }
1731}
1732
1733static void ath10k_pci_fw_dump_work(struct work_struct *work)
1734{
1735 struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
1736 dump_work);
1737 struct ath10k_fw_crash_data *crash_data;
1738 struct ath10k *ar = ar_pci->ar;
1739 char guid[UUID_STRING_LEN + 1];
1740
1741 mutex_lock(&ar->dump_mutex);
1742
1743 spin_lock_bh(&ar->data_lock);
1744 ar->stats.fw_crash_counter++;
1745 spin_unlock_bh(&ar->data_lock);
1746
1747 crash_data = ath10k_coredump_new(ar);
1748
1749 if (crash_data)
1750 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1751 else
1752 scnprintf(guid, sizeof(guid), "n/a");
1753
1754 ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1755 ath10k_print_driver_info(ar);
1756 ath10k_pci_dump_registers(ar, crash_data);
1757 ath10k_ce_dump_registers(ar, crash_data);
1758 ath10k_pci_dump_memory(ar, crash_data);
1759
1760 mutex_unlock(&ar->dump_mutex);
1761
1762 queue_work(ar->workqueue, &ar->restart_work);
1763}
1764
1765static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1766{
1767 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1768
1769 queue_work(ar->workqueue, &ar_pci->dump_work);
1770}
1771
1772void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1773 int force)
1774{
1775 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1776
1777 if (!force) {
1778 int resources;
1779
1780
1781
1782
1783
1784
1785
1786 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1787
1788
1789
1790
1791
1792 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1793 return;
1794 }
1795 ath10k_ce_per_engine_service(ar, pipe);
1796}
1797
1798static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1799{
1800 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1801
1802 del_timer_sync(&ar_pci->rx_post_retry);
1803}
1804
1805int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1806 u8 *ul_pipe, u8 *dl_pipe)
1807{
1808 const struct service_to_pipe *entry;
1809 bool ul_set = false, dl_set = false;
1810 int i;
1811
1812 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1813
1814 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1815 entry = &target_service_to_ce_map_wlan[i];
1816
1817 if (__le32_to_cpu(entry->service_id) != service_id)
1818 continue;
1819
1820 switch (__le32_to_cpu(entry->pipedir)) {
1821 case PIPEDIR_NONE:
1822 break;
1823 case PIPEDIR_IN:
1824 WARN_ON(dl_set);
1825 *dl_pipe = __le32_to_cpu(entry->pipenum);
1826 dl_set = true;
1827 break;
1828 case PIPEDIR_OUT:
1829 WARN_ON(ul_set);
1830 *ul_pipe = __le32_to_cpu(entry->pipenum);
1831 ul_set = true;
1832 break;
1833 case PIPEDIR_INOUT:
1834 WARN_ON(dl_set);
1835 WARN_ON(ul_set);
1836 *dl_pipe = __le32_to_cpu(entry->pipenum);
1837 *ul_pipe = __le32_to_cpu(entry->pipenum);
1838 dl_set = true;
1839 ul_set = true;
1840 break;
1841 }
1842 }
1843
1844 if (!ul_set || !dl_set)
1845 return -ENOENT;
1846
1847 return 0;
1848}
1849
1850void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1851 u8 *ul_pipe, u8 *dl_pipe)
1852{
1853 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1854
1855 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1856 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1857 ul_pipe, dl_pipe);
1858}
1859
1860void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1861{
1862 u32 val;
1863
1864 switch (ar->hw_rev) {
1865 case ATH10K_HW_QCA988X:
1866 case ATH10K_HW_QCA9887:
1867 case ATH10K_HW_QCA6174:
1868 case ATH10K_HW_QCA9377:
1869 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1870 CORE_CTRL_ADDRESS);
1871 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1872 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1873 CORE_CTRL_ADDRESS, val);
1874 break;
1875 case ATH10K_HW_QCA99X0:
1876 case ATH10K_HW_QCA9984:
1877 case ATH10K_HW_QCA9888:
1878 case ATH10K_HW_QCA4019:
1879
1880
1881
1882 break;
1883 case ATH10K_HW_WCN3990:
1884 break;
1885 }
1886}
1887
1888static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1889{
1890 u32 val;
1891
1892 switch (ar->hw_rev) {
1893 case ATH10K_HW_QCA988X:
1894 case ATH10K_HW_QCA9887:
1895 case ATH10K_HW_QCA6174:
1896 case ATH10K_HW_QCA9377:
1897 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1898 CORE_CTRL_ADDRESS);
1899 val |= CORE_CTRL_PCIE_REG_31_MASK;
1900 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1901 CORE_CTRL_ADDRESS, val);
1902 break;
1903 case ATH10K_HW_QCA99X0:
1904 case ATH10K_HW_QCA9984:
1905 case ATH10K_HW_QCA9888:
1906 case ATH10K_HW_QCA4019:
1907
1908
1909
1910 break;
1911 case ATH10K_HW_WCN3990:
1912 break;
1913 }
1914}
1915
1916static void ath10k_pci_irq_disable(struct ath10k *ar)
1917{
1918 ath10k_ce_disable_interrupts(ar);
1919 ath10k_pci_disable_and_clear_legacy_irq(ar);
1920 ath10k_pci_irq_msi_fw_mask(ar);
1921}
1922
1923static void ath10k_pci_irq_sync(struct ath10k *ar)
1924{
1925 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1926
1927 synchronize_irq(ar_pci->pdev->irq);
1928}
1929
1930static void ath10k_pci_irq_enable(struct ath10k *ar)
1931{
1932 ath10k_ce_enable_interrupts(ar);
1933 ath10k_pci_enable_legacy_irq(ar);
1934 ath10k_pci_irq_msi_fw_unmask(ar);
1935}
1936
1937static int ath10k_pci_hif_start(struct ath10k *ar)
1938{
1939 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1940
1941 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1942
1943 napi_enable(&ar->napi);
1944
1945 ath10k_pci_irq_enable(ar);
1946 ath10k_pci_rx_post(ar);
1947
1948 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1949 ar_pci->link_ctl);
1950
1951 return 0;
1952}
1953
1954static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1955{
1956 struct ath10k *ar;
1957 struct ath10k_ce_pipe *ce_pipe;
1958 struct ath10k_ce_ring *ce_ring;
1959 struct sk_buff *skb;
1960 int i;
1961
1962 ar = pci_pipe->hif_ce_state;
1963 ce_pipe = pci_pipe->ce_hdl;
1964 ce_ring = ce_pipe->dest_ring;
1965
1966 if (!ce_ring)
1967 return;
1968
1969 if (!pci_pipe->buf_sz)
1970 return;
1971
1972 for (i = 0; i < ce_ring->nentries; i++) {
1973 skb = ce_ring->per_transfer_context[i];
1974 if (!skb)
1975 continue;
1976
1977 ce_ring->per_transfer_context[i] = NULL;
1978
1979 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1980 skb->len + skb_tailroom(skb),
1981 DMA_FROM_DEVICE);
1982 dev_kfree_skb_any(skb);
1983 }
1984}
1985
1986static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1987{
1988 struct ath10k *ar;
1989 struct ath10k_ce_pipe *ce_pipe;
1990 struct ath10k_ce_ring *ce_ring;
1991 struct sk_buff *skb;
1992 int i;
1993
1994 ar = pci_pipe->hif_ce_state;
1995 ce_pipe = pci_pipe->ce_hdl;
1996 ce_ring = ce_pipe->src_ring;
1997
1998 if (!ce_ring)
1999 return;
2000
2001 if (!pci_pipe->buf_sz)
2002 return;
2003
2004 for (i = 0; i < ce_ring->nentries; i++) {
2005 skb = ce_ring->per_transfer_context[i];
2006 if (!skb)
2007 continue;
2008
2009 ce_ring->per_transfer_context[i] = NULL;
2010
2011 ath10k_htc_tx_completion_handler(ar, skb);
2012 }
2013}
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
2024{
2025 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2026 int pipe_num;
2027
2028 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
2029 struct ath10k_pci_pipe *pipe_info;
2030
2031 pipe_info = &ar_pci->pipe_info[pipe_num];
2032 ath10k_pci_rx_pipe_cleanup(pipe_info);
2033 ath10k_pci_tx_pipe_cleanup(pipe_info);
2034 }
2035}
2036
2037void ath10k_pci_ce_deinit(struct ath10k *ar)
2038{
2039 int i;
2040
2041 for (i = 0; i < CE_COUNT; i++)
2042 ath10k_ce_deinit_pipe(ar, i);
2043}
2044
2045void ath10k_pci_flush(struct ath10k *ar)
2046{
2047 ath10k_pci_rx_retry_sync(ar);
2048 ath10k_pci_buffer_cleanup(ar);
2049}
2050
2051static void ath10k_pci_hif_stop(struct ath10k *ar)
2052{
2053 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2054 unsigned long flags;
2055
2056 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
2057
2058 ath10k_pci_irq_disable(ar);
2059 ath10k_pci_irq_sync(ar);
2060 napi_synchronize(&ar->napi);
2061 napi_disable(&ar->napi);
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074 ath10k_pci_safe_chip_reset(ar);
2075
2076 ath10k_pci_flush(ar);
2077
2078 spin_lock_irqsave(&ar_pci->ps_lock, flags);
2079 WARN_ON(ar_pci->ps_wake_refcount > 0);
2080 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2081}
2082
2083int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2084 void *req, u32 req_len,
2085 void *resp, u32 *resp_len)
2086{
2087 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2088 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2089 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2090 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2091 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2092 dma_addr_t req_paddr = 0;
2093 dma_addr_t resp_paddr = 0;
2094 struct bmi_xfer xfer = {};
2095 void *treq, *tresp = NULL;
2096 int ret = 0;
2097
2098 might_sleep();
2099
2100 if (resp && !resp_len)
2101 return -EINVAL;
2102
2103 if (resp && resp_len && *resp_len == 0)
2104 return -EINVAL;
2105
2106 treq = kmemdup(req, req_len, GFP_KERNEL);
2107 if (!treq)
2108 return -ENOMEM;
2109
2110 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2111 ret = dma_mapping_error(ar->dev, req_paddr);
2112 if (ret) {
2113 ret = -EIO;
2114 goto err_dma;
2115 }
2116
2117 if (resp && resp_len) {
2118 tresp = kzalloc(*resp_len, GFP_KERNEL);
2119 if (!tresp) {
2120 ret = -ENOMEM;
2121 goto err_req;
2122 }
2123
2124 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2125 DMA_FROM_DEVICE);
2126 ret = dma_mapping_error(ar->dev, resp_paddr);
2127 if (ret) {
2128 ret = -EIO;
2129 goto err_req;
2130 }
2131
2132 xfer.wait_for_resp = true;
2133 xfer.resp_len = 0;
2134
2135 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2136 }
2137
2138 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2139 if (ret)
2140 goto err_resp;
2141
2142 ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2143 if (ret) {
2144 dma_addr_t unused_buffer;
2145 unsigned int unused_nbytes;
2146 unsigned int unused_id;
2147
2148 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2149 &unused_nbytes, &unused_id);
2150 } else {
2151
2152 ret = 0;
2153 }
2154
2155err_resp:
2156 if (resp) {
2157 dma_addr_t unused_buffer;
2158
2159 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2160 dma_unmap_single(ar->dev, resp_paddr,
2161 *resp_len, DMA_FROM_DEVICE);
2162 }
2163err_req:
2164 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2165
2166 if (ret == 0 && resp_len) {
2167 *resp_len = min(*resp_len, xfer.resp_len);
2168 memcpy(resp, tresp, xfer.resp_len);
2169 }
2170err_dma:
2171 kfree(treq);
2172 kfree(tresp);
2173
2174 return ret;
2175}
2176
2177static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2178{
2179 struct bmi_xfer *xfer;
2180
2181 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2182 return;
2183
2184 xfer->tx_done = true;
2185}
2186
2187static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2188{
2189 struct ath10k *ar = ce_state->ar;
2190 struct bmi_xfer *xfer;
2191 unsigned int nbytes;
2192
2193 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2194 &nbytes))
2195 return;
2196
2197 if (WARN_ON_ONCE(!xfer))
2198 return;
2199
2200 if (!xfer->wait_for_resp) {
2201 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2202 return;
2203 }
2204
2205 xfer->resp_len = nbytes;
2206 xfer->rx_done = true;
2207}
2208
2209static int ath10k_pci_bmi_wait(struct ath10k *ar,
2210 struct ath10k_ce_pipe *tx_pipe,
2211 struct ath10k_ce_pipe *rx_pipe,
2212 struct bmi_xfer *xfer)
2213{
2214 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2215 unsigned long started = jiffies;
2216 unsigned long dur;
2217 int ret;
2218
2219 while (time_before_eq(jiffies, timeout)) {
2220 ath10k_pci_bmi_send_done(tx_pipe);
2221 ath10k_pci_bmi_recv_data(rx_pipe);
2222
2223 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2224 ret = 0;
2225 goto out;
2226 }
2227
2228 schedule();
2229 }
2230
2231 ret = -ETIMEDOUT;
2232
2233out:
2234 dur = jiffies - started;
2235 if (dur > HZ)
2236 ath10k_dbg(ar, ATH10K_DBG_BMI,
2237 "bmi cmd took %lu jiffies hz %d ret %d\n",
2238 dur, HZ, ret);
2239 return ret;
2240}
2241
2242
2243
2244
2245
2246static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2247{
2248 u32 addr, val;
2249
2250 addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2251 val = ath10k_pci_read32(ar, addr);
2252 val |= CORE_CTRL_CPU_INTR_MASK;
2253 ath10k_pci_write32(ar, addr, val);
2254
2255 return 0;
2256}
2257
2258static int ath10k_pci_get_num_banks(struct ath10k *ar)
2259{
2260 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2261
2262 switch (ar_pci->pdev->device) {
2263 case QCA988X_2_0_DEVICE_ID_UBNT:
2264 case QCA988X_2_0_DEVICE_ID:
2265 case QCA99X0_2_0_DEVICE_ID:
2266 case QCA9888_2_0_DEVICE_ID:
2267 case QCA9984_1_0_DEVICE_ID:
2268 case QCA9887_1_0_DEVICE_ID:
2269 return 1;
2270 case QCA6164_2_1_DEVICE_ID:
2271 case QCA6174_2_1_DEVICE_ID:
2272 switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
2273 case QCA6174_HW_1_0_CHIP_ID_REV:
2274 case QCA6174_HW_1_1_CHIP_ID_REV:
2275 case QCA6174_HW_2_1_CHIP_ID_REV:
2276 case QCA6174_HW_2_2_CHIP_ID_REV:
2277 return 3;
2278 case QCA6174_HW_1_3_CHIP_ID_REV:
2279 return 2;
2280 case QCA6174_HW_3_0_CHIP_ID_REV:
2281 case QCA6174_HW_3_1_CHIP_ID_REV:
2282 case QCA6174_HW_3_2_CHIP_ID_REV:
2283 return 9;
2284 }
2285 break;
2286 case QCA9377_1_0_DEVICE_ID:
2287 return 9;
2288 }
2289
2290 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2291 return 1;
2292}
2293
2294static int ath10k_bus_get_num_banks(struct ath10k *ar)
2295{
2296 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2297
2298 return ce->bus_ops->get_num_banks(ar);
2299}
2300
2301int ath10k_pci_init_config(struct ath10k *ar)
2302{
2303 u32 interconnect_targ_addr;
2304 u32 pcie_state_targ_addr = 0;
2305 u32 pipe_cfg_targ_addr = 0;
2306 u32 svc_to_pipe_map = 0;
2307 u32 pcie_config_flags = 0;
2308 u32 ealloc_value;
2309 u32 ealloc_targ_addr;
2310 u32 flag2_value;
2311 u32 flag2_targ_addr;
2312 int ret = 0;
2313
2314
2315 interconnect_targ_addr =
2316 host_interest_item_address(HI_ITEM(hi_interconnect_state));
2317
2318
2319 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2320 &pcie_state_targ_addr);
2321 if (ret != 0) {
2322 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2323 return ret;
2324 }
2325
2326 if (pcie_state_targ_addr == 0) {
2327 ret = -EIO;
2328 ath10k_err(ar, "Invalid pcie state addr\n");
2329 return ret;
2330 }
2331
2332 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2333 offsetof(struct pcie_state,
2334 pipe_cfg_addr)),
2335 &pipe_cfg_targ_addr);
2336 if (ret != 0) {
2337 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2338 return ret;
2339 }
2340
2341 if (pipe_cfg_targ_addr == 0) {
2342 ret = -EIO;
2343 ath10k_err(ar, "Invalid pipe cfg addr\n");
2344 return ret;
2345 }
2346
2347 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2348 target_ce_config_wlan,
2349 sizeof(struct ce_pipe_config) *
2350 NUM_TARGET_CE_CONFIG_WLAN);
2351
2352 if (ret != 0) {
2353 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2354 return ret;
2355 }
2356
2357 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2358 offsetof(struct pcie_state,
2359 svc_to_pipe_map)),
2360 &svc_to_pipe_map);
2361 if (ret != 0) {
2362 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2363 return ret;
2364 }
2365
2366 if (svc_to_pipe_map == 0) {
2367 ret = -EIO;
2368 ath10k_err(ar, "Invalid svc_to_pipe map\n");
2369 return ret;
2370 }
2371
2372 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2373 target_service_to_ce_map_wlan,
2374 sizeof(target_service_to_ce_map_wlan));
2375 if (ret != 0) {
2376 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2377 return ret;
2378 }
2379
2380 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2381 offsetof(struct pcie_state,
2382 config_flags)),
2383 &pcie_config_flags);
2384 if (ret != 0) {
2385 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2386 return ret;
2387 }
2388
2389 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2390
2391 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2392 offsetof(struct pcie_state,
2393 config_flags)),
2394 pcie_config_flags);
2395 if (ret != 0) {
2396 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2397 return ret;
2398 }
2399
2400
2401 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2402
2403 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2404 if (ret != 0) {
2405 ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2406 return ret;
2407 }
2408
2409
2410 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2411 HI_EARLY_ALLOC_MAGIC_MASK);
2412 ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2413 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2414 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2415
2416 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2417 if (ret != 0) {
2418 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2419 return ret;
2420 }
2421
2422
2423 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2424
2425 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2426 if (ret != 0) {
2427 ath10k_err(ar, "Failed to get option val: %d\n", ret);
2428 return ret;
2429 }
2430
2431 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2432
2433 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2434 if (ret != 0) {
2435 ath10k_err(ar, "Failed to set option val: %d\n", ret);
2436 return ret;
2437 }
2438
2439 return 0;
2440}
2441
2442static void ath10k_pci_override_ce_config(struct ath10k *ar)
2443{
2444 struct ce_attr *attr;
2445 struct ce_pipe_config *config;
2446
2447
2448
2449
2450
2451
2452 attr = &host_ce_config_wlan[5];
2453 attr->src_sz_max = 0;
2454 attr->dest_nentries = 0;
2455
2456
2457 config = &target_ce_config_wlan[5];
2458 config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2459 config->nbytes_max = __cpu_to_le32(2048);
2460
2461
2462 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2463}
2464
2465int ath10k_pci_alloc_pipes(struct ath10k *ar)
2466{
2467 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2468 struct ath10k_pci_pipe *pipe;
2469 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2470 int i, ret;
2471
2472 for (i = 0; i < CE_COUNT; i++) {
2473 pipe = &ar_pci->pipe_info[i];
2474 pipe->ce_hdl = &ce->ce_states[i];
2475 pipe->pipe_num = i;
2476 pipe->hif_ce_state = ar;
2477
2478 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
2479 if (ret) {
2480 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2481 i, ret);
2482 return ret;
2483 }
2484
2485
2486 if (i == CE_DIAG_PIPE) {
2487 ar_pci->ce_diag = pipe->ce_hdl;
2488 continue;
2489 }
2490
2491 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2492 }
2493
2494 return 0;
2495}
2496
2497void ath10k_pci_free_pipes(struct ath10k *ar)
2498{
2499 int i;
2500
2501 for (i = 0; i < CE_COUNT; i++)
2502 ath10k_ce_free_pipe(ar, i);
2503}
2504
2505int ath10k_pci_init_pipes(struct ath10k *ar)
2506{
2507 int i, ret;
2508
2509 for (i = 0; i < CE_COUNT; i++) {
2510 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2511 if (ret) {
2512 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2513 i, ret);
2514 return ret;
2515 }
2516 }
2517
2518 return 0;
2519}
2520
2521static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2522{
2523 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2524 FW_IND_EVENT_PENDING;
2525}
2526
2527static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2528{
2529 u32 val;
2530
2531 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2532 val &= ~FW_IND_EVENT_PENDING;
2533 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2534}
2535
2536static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2537{
2538 u32 val;
2539
2540 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2541 return (val == 0xffffffff);
2542}
2543
2544
2545static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2546{
2547 u32 val;
2548
2549 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2550 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2551 val | SOC_RESET_CONTROL_SI0_RST_MASK);
2552 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2553
2554 msleep(10);
2555
2556 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2557 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2558 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2559 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2560
2561 msleep(10);
2562}
2563
2564static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2565{
2566 u32 val;
2567
2568 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2569
2570 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2571 SOC_RESET_CONTROL_ADDRESS);
2572 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2573 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2574}
2575
2576static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2577{
2578 u32 val;
2579
2580 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2581 SOC_RESET_CONTROL_ADDRESS);
2582
2583 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2584 val | SOC_RESET_CONTROL_CE_RST_MASK);
2585 msleep(10);
2586 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2587 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2588}
2589
2590static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2591{
2592 u32 val;
2593
2594 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2595 SOC_LF_TIMER_CONTROL0_ADDRESS);
2596 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2597 SOC_LF_TIMER_CONTROL0_ADDRESS,
2598 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2599}
2600
2601static int ath10k_pci_warm_reset(struct ath10k *ar)
2602{
2603 int ret;
2604
2605 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2606
2607 spin_lock_bh(&ar->data_lock);
2608 ar->stats.fw_warm_reset_counter++;
2609 spin_unlock_bh(&ar->data_lock);
2610
2611 ath10k_pci_irq_disable(ar);
2612
2613
2614
2615
2616
2617
2618 ath10k_pci_warm_reset_si0(ar);
2619 ath10k_pci_warm_reset_cpu(ar);
2620 ath10k_pci_init_pipes(ar);
2621 ath10k_pci_wait_for_target_init(ar);
2622
2623 ath10k_pci_warm_reset_clear_lf(ar);
2624 ath10k_pci_warm_reset_ce(ar);
2625 ath10k_pci_warm_reset_cpu(ar);
2626 ath10k_pci_init_pipes(ar);
2627
2628 ret = ath10k_pci_wait_for_target_init(ar);
2629 if (ret) {
2630 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2631 return ret;
2632 }
2633
2634 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2635
2636 return 0;
2637}
2638
2639static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2640{
2641 ath10k_pci_irq_disable(ar);
2642 return ath10k_pci_qca99x0_chip_reset(ar);
2643}
2644
2645static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2646{
2647 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2648
2649 if (!ar_pci->pci_soft_reset)
2650 return -ENOTSUPP;
2651
2652 return ar_pci->pci_soft_reset(ar);
2653}
2654
2655static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2656{
2657 int i, ret;
2658 u32 val;
2659
2660 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2661
2662
2663
2664
2665
2666
2667
2668
2669 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2670 ret = ath10k_pci_warm_reset(ar);
2671 if (ret) {
2672 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2673 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2674 ret);
2675 continue;
2676 }
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687 ret = ath10k_pci_init_pipes(ar);
2688 if (ret) {
2689 ath10k_warn(ar, "failed to init copy engine: %d\n",
2690 ret);
2691 continue;
2692 }
2693
2694 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2695 &val);
2696 if (ret) {
2697 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2698 ret);
2699 continue;
2700 }
2701
2702 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2703 return 0;
2704 }
2705
2706 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2707 ath10k_warn(ar, "refusing cold reset as requested\n");
2708 return -EPERM;
2709 }
2710
2711 ret = ath10k_pci_cold_reset(ar);
2712 if (ret) {
2713 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2714 return ret;
2715 }
2716
2717 ret = ath10k_pci_wait_for_target_init(ar);
2718 if (ret) {
2719 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2720 ret);
2721 return ret;
2722 }
2723
2724 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2725
2726 return 0;
2727}
2728
2729static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2730{
2731 int ret;
2732
2733 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2734
2735
2736
2737 ret = ath10k_pci_cold_reset(ar);
2738 if (ret) {
2739 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2740 return ret;
2741 }
2742
2743 ret = ath10k_pci_wait_for_target_init(ar);
2744 if (ret) {
2745 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2746 ret);
2747 return ret;
2748 }
2749
2750 ret = ath10k_pci_warm_reset(ar);
2751 if (ret) {
2752 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2753 return ret;
2754 }
2755
2756 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2757
2758 return 0;
2759}
2760
2761static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2762{
2763 int ret;
2764
2765 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2766
2767 ret = ath10k_pci_cold_reset(ar);
2768 if (ret) {
2769 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2770 return ret;
2771 }
2772
2773 ret = ath10k_pci_wait_for_target_init(ar);
2774 if (ret) {
2775 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2776 ret);
2777 return ret;
2778 }
2779
2780 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2781
2782 return 0;
2783}
2784
2785static int ath10k_pci_chip_reset(struct ath10k *ar)
2786{
2787 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2788
2789 if (WARN_ON(!ar_pci->pci_hard_reset))
2790 return -ENOTSUPP;
2791
2792 return ar_pci->pci_hard_reset(ar);
2793}
2794
2795static int ath10k_pci_hif_power_up(struct ath10k *ar,
2796 enum ath10k_firmware_mode fw_mode)
2797{
2798 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2799 int ret;
2800
2801 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2802
2803 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2804 &ar_pci->link_ctl);
2805 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2806 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818 ret = ath10k_pci_chip_reset(ar);
2819 if (ret) {
2820 if (ath10k_pci_has_fw_crashed(ar)) {
2821 ath10k_warn(ar, "firmware crashed during chip reset\n");
2822 ath10k_pci_fw_crashed_clear(ar);
2823 ath10k_pci_fw_crashed_dump(ar);
2824 }
2825
2826 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2827 goto err_sleep;
2828 }
2829
2830 ret = ath10k_pci_init_pipes(ar);
2831 if (ret) {
2832 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2833 goto err_sleep;
2834 }
2835
2836 ret = ath10k_pci_init_config(ar);
2837 if (ret) {
2838 ath10k_err(ar, "failed to setup init config: %d\n", ret);
2839 goto err_ce;
2840 }
2841
2842 ret = ath10k_pci_wake_target_cpu(ar);
2843 if (ret) {
2844 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2845 goto err_ce;
2846 }
2847
2848 return 0;
2849
2850err_ce:
2851 ath10k_pci_ce_deinit(ar);
2852
2853err_sleep:
2854 return ret;
2855}
2856
2857void ath10k_pci_hif_power_down(struct ath10k *ar)
2858{
2859 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2860
2861
2862
2863
2864}
2865
2866static int ath10k_pci_hif_suspend(struct ath10k *ar)
2867{
2868
2869 return 0;
2870}
2871
2872static int ath10k_pci_suspend(struct ath10k *ar)
2873{
2874
2875
2876
2877
2878
2879 ath10k_pci_sleep_sync(ar);
2880
2881 return 0;
2882}
2883
2884static int ath10k_pci_hif_resume(struct ath10k *ar)
2885{
2886
2887 return 0;
2888}
2889
2890static int ath10k_pci_resume(struct ath10k *ar)
2891{
2892 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2893 struct pci_dev *pdev = ar_pci->pdev;
2894 u32 val;
2895 int ret = 0;
2896
2897 ret = ath10k_pci_force_wake(ar);
2898 if (ret) {
2899 ath10k_err(ar, "failed to wake up target: %d\n", ret);
2900 return ret;
2901 }
2902
2903
2904
2905
2906
2907
2908 pci_read_config_dword(pdev, 0x40, &val);
2909 if ((val & 0x0000ff00) != 0)
2910 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2911
2912 return ret;
2913}
2914
2915static bool ath10k_pci_validate_cal(void *data, size_t size)
2916{
2917 __le16 *cal_words = data;
2918 u16 checksum = 0;
2919 size_t i;
2920
2921 if (size % 2 != 0)
2922 return false;
2923
2924 for (i = 0; i < size / 2; i++)
2925 checksum ^= le16_to_cpu(cal_words[i]);
2926
2927 return checksum == 0xffff;
2928}
2929
2930static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2931{
2932
2933 ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2934
2935
2936 ath10k_pci_write32(ar,
2937 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2938 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2939 SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2940 GPIO_PIN0_CONFIG) |
2941 SM(1, GPIO_PIN0_PAD_PULL));
2942
2943 ath10k_pci_write32(ar,
2944 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2945 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2946 SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2947 SM(1, GPIO_PIN0_PAD_PULL));
2948
2949 ath10k_pci_write32(ar,
2950 GPIO_BASE_ADDRESS +
2951 QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2952 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2953
2954
2955 ath10k_pci_write32(ar,
2956 SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2957 SM(1, SI_CONFIG_ERR_INT) |
2958 SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2959 SM(1, SI_CONFIG_I2C) |
2960 SM(1, SI_CONFIG_POS_SAMPLE) |
2961 SM(1, SI_CONFIG_INACTIVE_DATA) |
2962 SM(1, SI_CONFIG_INACTIVE_CLK) |
2963 SM(8, SI_CONFIG_DIVIDER));
2964}
2965
2966static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2967{
2968 u32 reg;
2969 int wait_limit;
2970
2971
2972 reg = QCA9887_EEPROM_SELECT_READ |
2973 SM(addr, QCA9887_EEPROM_ADDR_LO) |
2974 SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2975 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2976
2977
2978 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2979 SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2980 SM(4, SI_CS_TX_CNT));
2981
2982
2983 wait_limit = 100000;
2984
2985
2986 do {
2987 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
2988 if (MS(reg, SI_CS_DONE_INT))
2989 break;
2990
2991 wait_limit--;
2992 udelay(10);
2993 } while (wait_limit > 0);
2994
2995 if (!MS(reg, SI_CS_DONE_INT)) {
2996 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
2997 addr);
2998 return -ETIMEDOUT;
2999 }
3000
3001
3002 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
3003
3004 if (MS(reg, SI_CS_DONE_ERR)) {
3005 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
3006 return -EIO;
3007 }
3008
3009
3010 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
3011 *out = reg;
3012
3013 return 0;
3014}
3015
3016static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
3017 size_t *data_len)
3018{
3019 u8 *caldata = NULL;
3020 size_t calsize, i;
3021 int ret;
3022
3023 if (!QCA_REV_9887(ar))
3024 return -EOPNOTSUPP;
3025
3026 calsize = ar->hw_params.cal_data_len;
3027 caldata = kmalloc(calsize, GFP_KERNEL);
3028 if (!caldata)
3029 return -ENOMEM;
3030
3031 ath10k_pci_enable_eeprom(ar);
3032
3033 for (i = 0; i < calsize; i++) {
3034 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
3035 if (ret)
3036 goto err_free;
3037 }
3038
3039 if (!ath10k_pci_validate_cal(caldata, calsize))
3040 goto err_free;
3041
3042 *data = caldata;
3043 *data_len = calsize;
3044
3045 return 0;
3046
3047err_free:
3048 kfree(caldata);
3049
3050 return -EINVAL;
3051}
3052
3053static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
3054 .tx_sg = ath10k_pci_hif_tx_sg,
3055 .diag_read = ath10k_pci_hif_diag_read,
3056 .diag_write = ath10k_pci_diag_write_mem,
3057 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
3058 .start = ath10k_pci_hif_start,
3059 .stop = ath10k_pci_hif_stop,
3060 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
3061 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
3062 .send_complete_check = ath10k_pci_hif_send_complete_check,
3063 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
3064 .power_up = ath10k_pci_hif_power_up,
3065 .power_down = ath10k_pci_hif_power_down,
3066 .read32 = ath10k_pci_read32,
3067 .write32 = ath10k_pci_write32,
3068 .suspend = ath10k_pci_hif_suspend,
3069 .resume = ath10k_pci_hif_resume,
3070 .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
3071};
3072
3073
3074
3075
3076
3077
3078static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3079{
3080 struct ath10k *ar = arg;
3081 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3082 int ret;
3083
3084 if (ath10k_pci_has_device_gone(ar))
3085 return IRQ_NONE;
3086
3087 ret = ath10k_pci_force_wake(ar);
3088 if (ret) {
3089 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3090 return IRQ_NONE;
3091 }
3092
3093 if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3094 !ath10k_pci_irq_pending(ar))
3095 return IRQ_NONE;
3096
3097 ath10k_pci_disable_and_clear_legacy_irq(ar);
3098 ath10k_pci_irq_msi_fw_mask(ar);
3099 napi_schedule(&ar->napi);
3100
3101 return IRQ_HANDLED;
3102}
3103
3104static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3105{
3106 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3107 int done = 0;
3108
3109 if (ath10k_pci_has_fw_crashed(ar)) {
3110 ath10k_pci_fw_crashed_clear(ar);
3111 ath10k_pci_fw_crashed_dump(ar);
3112 napi_complete(ctx);
3113 return done;
3114 }
3115
3116 ath10k_ce_per_engine_service_any(ar);
3117
3118 done = ath10k_htt_txrx_compl_task(ar, budget);
3119
3120 if (done < budget) {
3121 napi_complete_done(ctx, done);
3122
3123
3124
3125
3126
3127
3128
3129
3130 if (ath10k_ce_interrupt_summary(ar)) {
3131 napi_reschedule(ctx);
3132 goto out;
3133 }
3134 ath10k_pci_enable_legacy_irq(ar);
3135 ath10k_pci_irq_msi_fw_unmask(ar);
3136 }
3137
3138out:
3139 return done;
3140}
3141
3142static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3143{
3144 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3145 int ret;
3146
3147 ret = request_irq(ar_pci->pdev->irq,
3148 ath10k_pci_interrupt_handler,
3149 IRQF_SHARED, "ath10k_pci", ar);
3150 if (ret) {
3151 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3152 ar_pci->pdev->irq, ret);
3153 return ret;
3154 }
3155
3156 return 0;
3157}
3158
3159static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
3160{
3161 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3162 int ret;
3163
3164 ret = request_irq(ar_pci->pdev->irq,
3165 ath10k_pci_interrupt_handler,
3166 IRQF_SHARED, "ath10k_pci", ar);
3167 if (ret) {
3168 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3169 ar_pci->pdev->irq, ret);
3170 return ret;
3171 }
3172
3173 return 0;
3174}
3175
3176static int ath10k_pci_request_irq(struct ath10k *ar)
3177{
3178 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3179
3180 switch (ar_pci->oper_irq_mode) {
3181 case ATH10K_PCI_IRQ_LEGACY:
3182 return ath10k_pci_request_irq_legacy(ar);
3183 case ATH10K_PCI_IRQ_MSI:
3184 return ath10k_pci_request_irq_msi(ar);
3185 default:
3186 return -EINVAL;
3187 }
3188}
3189
3190static void ath10k_pci_free_irq(struct ath10k *ar)
3191{
3192 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3193
3194 free_irq(ar_pci->pdev->irq, ar);
3195}
3196
3197void ath10k_pci_init_napi(struct ath10k *ar)
3198{
3199 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
3200 ATH10K_NAPI_BUDGET);
3201}
3202
3203static int ath10k_pci_init_irq(struct ath10k *ar)
3204{
3205 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3206 int ret;
3207
3208 ath10k_pci_init_napi(ar);
3209
3210 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3211 ath10k_info(ar, "limiting irq mode to: %d\n",
3212 ath10k_pci_irq_mode);
3213
3214
3215 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
3216 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3217 ret = pci_enable_msi(ar_pci->pdev);
3218 if (ret == 0)
3219 return 0;
3220
3221
3222 }
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
3234
3235 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3236 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3237
3238 return 0;
3239}
3240
3241static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
3242{
3243 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3244 0);
3245}
3246
3247static int ath10k_pci_deinit_irq(struct ath10k *ar)
3248{
3249 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3250
3251 switch (ar_pci->oper_irq_mode) {
3252 case ATH10K_PCI_IRQ_LEGACY:
3253 ath10k_pci_deinit_irq_legacy(ar);
3254 break;
3255 default:
3256 pci_disable_msi(ar_pci->pdev);
3257 break;
3258 }
3259
3260 return 0;
3261}
3262
3263int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3264{
3265 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3266 unsigned long timeout;
3267 u32 val;
3268
3269 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3270
3271 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3272
3273 do {
3274 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3275
3276 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3277 val);
3278
3279
3280 if (val == 0xffffffff)
3281 continue;
3282
3283
3284 if (val & FW_IND_EVENT_PENDING)
3285 break;
3286
3287 if (val & FW_IND_INITIALIZED)
3288 break;
3289
3290 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3291
3292 ath10k_pci_enable_legacy_irq(ar);
3293
3294 mdelay(10);
3295 } while (time_before(jiffies, timeout));
3296
3297 ath10k_pci_disable_and_clear_legacy_irq(ar);
3298 ath10k_pci_irq_msi_fw_mask(ar);
3299
3300 if (val == 0xffffffff) {
3301 ath10k_err(ar, "failed to read device register, device is gone\n");
3302 return -EIO;
3303 }
3304
3305 if (val & FW_IND_EVENT_PENDING) {
3306 ath10k_warn(ar, "device has crashed during init\n");
3307 return -ECOMM;
3308 }
3309
3310 if (!(val & FW_IND_INITIALIZED)) {
3311 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3312 val);
3313 return -ETIMEDOUT;
3314 }
3315
3316 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3317 return 0;
3318}
3319
3320static int ath10k_pci_cold_reset(struct ath10k *ar)
3321{
3322 u32 val;
3323
3324 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3325
3326 spin_lock_bh(&ar->data_lock);
3327
3328 ar->stats.fw_cold_reset_counter++;
3329
3330 spin_unlock_bh(&ar->data_lock);
3331
3332
3333 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3334 val |= 1;
3335 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3336
3337
3338
3339
3340
3341
3342 msleep(20);
3343
3344
3345 val &= ~1;
3346 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3347
3348 msleep(20);
3349
3350 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3351
3352 return 0;
3353}
3354
3355static int ath10k_pci_claim(struct ath10k *ar)
3356{
3357 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3358 struct pci_dev *pdev = ar_pci->pdev;
3359 int ret;
3360
3361 pci_set_drvdata(pdev, ar);
3362
3363 ret = pci_enable_device(pdev);
3364 if (ret) {
3365 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3366 return ret;
3367 }
3368
3369 ret = pci_request_region(pdev, BAR_NUM, "ath");
3370 if (ret) {
3371 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3372 ret);
3373 goto err_device;
3374 }
3375
3376
3377 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3378 if (ret) {
3379 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3380 goto err_region;
3381 }
3382
3383 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3384 if (ret) {
3385 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
3386 ret);
3387 goto err_region;
3388 }
3389
3390 pci_set_master(pdev);
3391
3392
3393 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3394 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3395 if (!ar_pci->mem) {
3396 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3397 ret = -EIO;
3398 goto err_master;
3399 }
3400
3401 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3402 return 0;
3403
3404err_master:
3405 pci_clear_master(pdev);
3406
3407err_region:
3408 pci_release_region(pdev, BAR_NUM);
3409
3410err_device:
3411 pci_disable_device(pdev);
3412
3413 return ret;
3414}
3415
3416static void ath10k_pci_release(struct ath10k *ar)
3417{
3418 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3419 struct pci_dev *pdev = ar_pci->pdev;
3420
3421 pci_iounmap(pdev, ar_pci->mem);
3422 pci_release_region(pdev, BAR_NUM);
3423 pci_clear_master(pdev);
3424 pci_disable_device(pdev);
3425}
3426
3427static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3428{
3429 const struct ath10k_pci_supp_chip *supp_chip;
3430 int i;
3431 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3432
3433 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3434 supp_chip = &ath10k_pci_supp_chips[i];
3435
3436 if (supp_chip->dev_id == dev_id &&
3437 supp_chip->rev_id == rev_id)
3438 return true;
3439 }
3440
3441 return false;
3442}
3443
3444int ath10k_pci_setup_resource(struct ath10k *ar)
3445{
3446 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3447 struct ath10k_ce *ce = ath10k_ce_priv(ar);
3448 int ret;
3449
3450 spin_lock_init(&ce->ce_lock);
3451 spin_lock_init(&ar_pci->ps_lock);
3452 mutex_init(&ar_pci->ce_diag_mutex);
3453
3454 INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
3455
3456 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3457
3458 if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3459 ath10k_pci_override_ce_config(ar);
3460
3461 ret = ath10k_pci_alloc_pipes(ar);
3462 if (ret) {
3463 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3464 ret);
3465 return ret;
3466 }
3467
3468 return 0;
3469}
3470
3471void ath10k_pci_release_resource(struct ath10k *ar)
3472{
3473 ath10k_pci_rx_retry_sync(ar);
3474 netif_napi_del(&ar->napi);
3475 ath10k_pci_ce_deinit(ar);
3476 ath10k_pci_free_pipes(ar);
3477}
3478
3479static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3480 .read32 = ath10k_bus_pci_read32,
3481 .write32 = ath10k_bus_pci_write32,
3482 .get_num_banks = ath10k_pci_get_num_banks,
3483};
3484
3485static int ath10k_pci_probe(struct pci_dev *pdev,
3486 const struct pci_device_id *pci_dev)
3487{
3488 int ret = 0;
3489 struct ath10k *ar;
3490 struct ath10k_pci *ar_pci;
3491 enum ath10k_hw_rev hw_rev;
3492 struct ath10k_bus_params bus_params = {};
3493 bool pci_ps;
3494 int (*pci_soft_reset)(struct ath10k *ar);
3495 int (*pci_hard_reset)(struct ath10k *ar);
3496 u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3497
3498 switch (pci_dev->device) {
3499 case QCA988X_2_0_DEVICE_ID_UBNT:
3500 case QCA988X_2_0_DEVICE_ID:
3501 hw_rev = ATH10K_HW_QCA988X;
3502 pci_ps = false;
3503 pci_soft_reset = ath10k_pci_warm_reset;
3504 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3505 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3506 break;
3507 case QCA9887_1_0_DEVICE_ID:
3508 hw_rev = ATH10K_HW_QCA9887;
3509 pci_ps = false;
3510 pci_soft_reset = ath10k_pci_warm_reset;
3511 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3512 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3513 break;
3514 case QCA6164_2_1_DEVICE_ID:
3515 case QCA6174_2_1_DEVICE_ID:
3516 hw_rev = ATH10K_HW_QCA6174;
3517 pci_ps = true;
3518 pci_soft_reset = ath10k_pci_warm_reset;
3519 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3520 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3521 break;
3522 case QCA99X0_2_0_DEVICE_ID:
3523 hw_rev = ATH10K_HW_QCA99X0;
3524 pci_ps = false;
3525 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3526 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3527 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3528 break;
3529 case QCA9984_1_0_DEVICE_ID:
3530 hw_rev = ATH10K_HW_QCA9984;
3531 pci_ps = false;
3532 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3533 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3534 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3535 break;
3536 case QCA9888_2_0_DEVICE_ID:
3537 hw_rev = ATH10K_HW_QCA9888;
3538 pci_ps = false;
3539 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3540 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3541 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3542 break;
3543 case QCA9377_1_0_DEVICE_ID:
3544 hw_rev = ATH10K_HW_QCA9377;
3545 pci_ps = true;
3546 pci_soft_reset = ath10k_pci_warm_reset;
3547 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3548 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3549 break;
3550 default:
3551 WARN_ON(1);
3552 return -ENOTSUPP;
3553 }
3554
3555 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3556 hw_rev, &ath10k_pci_hif_ops);
3557 if (!ar) {
3558 dev_err(&pdev->dev, "failed to allocate core\n");
3559 return -ENOMEM;
3560 }
3561
3562 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3563 pdev->vendor, pdev->device,
3564 pdev->subsystem_vendor, pdev->subsystem_device);
3565
3566 ar_pci = ath10k_pci_priv(ar);
3567 ar_pci->pdev = pdev;
3568 ar_pci->dev = &pdev->dev;
3569 ar_pci->ar = ar;
3570 ar->dev_id = pci_dev->device;
3571 ar_pci->pci_ps = pci_ps;
3572 ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3573 ar_pci->pci_soft_reset = pci_soft_reset;
3574 ar_pci->pci_hard_reset = pci_hard_reset;
3575 ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3576 ar->ce_priv = &ar_pci->ce;
3577
3578 ar->id.vendor = pdev->vendor;
3579 ar->id.device = pdev->device;
3580 ar->id.subsystem_vendor = pdev->subsystem_vendor;
3581 ar->id.subsystem_device = pdev->subsystem_device;
3582
3583 timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
3584
3585 ret = ath10k_pci_setup_resource(ar);
3586 if (ret) {
3587 ath10k_err(ar, "failed to setup resource: %d\n", ret);
3588 goto err_core_destroy;
3589 }
3590
3591 ret = ath10k_pci_claim(ar);
3592 if (ret) {
3593 ath10k_err(ar, "failed to claim device: %d\n", ret);
3594 goto err_free_pipes;
3595 }
3596
3597 ret = ath10k_pci_force_wake(ar);
3598 if (ret) {
3599 ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3600 goto err_sleep;
3601 }
3602
3603 ath10k_pci_ce_deinit(ar);
3604 ath10k_pci_irq_disable(ar);
3605
3606 ret = ath10k_pci_init_irq(ar);
3607 if (ret) {
3608 ath10k_err(ar, "failed to init irqs: %d\n", ret);
3609 goto err_sleep;
3610 }
3611
3612 ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3613 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3614 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3615
3616 ret = ath10k_pci_request_irq(ar);
3617 if (ret) {
3618 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3619 goto err_deinit_irq;
3620 }
3621
3622 ret = ath10k_pci_chip_reset(ar);
3623 if (ret) {
3624 ath10k_err(ar, "failed to reset chip: %d\n", ret);
3625 goto err_free_irq;
3626 }
3627
3628 bus_params.dev_type = ATH10K_DEV_TYPE_LL;
3629 bus_params.link_can_suspend = true;
3630 bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3631 if (bus_params.chip_id == 0xffffffff) {
3632 ath10k_err(ar, "failed to get chip id\n");
3633 goto err_free_irq;
3634 }
3635
3636 if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
3637 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3638 pdev->device, bus_params.chip_id);
3639 goto err_free_irq;
3640 }
3641
3642 ret = ath10k_core_register(ar, &bus_params);
3643 if (ret) {
3644 ath10k_err(ar, "failed to register driver core: %d\n", ret);
3645 goto err_free_irq;
3646 }
3647
3648 return 0;
3649
3650err_free_irq:
3651 ath10k_pci_free_irq(ar);
3652 ath10k_pci_rx_retry_sync(ar);
3653
3654err_deinit_irq:
3655 ath10k_pci_deinit_irq(ar);
3656
3657err_sleep:
3658 ath10k_pci_sleep_sync(ar);
3659 ath10k_pci_release(ar);
3660
3661err_free_pipes:
3662 ath10k_pci_free_pipes(ar);
3663
3664err_core_destroy:
3665 ath10k_core_destroy(ar);
3666
3667 return ret;
3668}
3669
3670static void ath10k_pci_remove(struct pci_dev *pdev)
3671{
3672 struct ath10k *ar = pci_get_drvdata(pdev);
3673 struct ath10k_pci *ar_pci;
3674
3675 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3676
3677 if (!ar)
3678 return;
3679
3680 ar_pci = ath10k_pci_priv(ar);
3681
3682 if (!ar_pci)
3683 return;
3684
3685 ath10k_core_unregister(ar);
3686 ath10k_pci_free_irq(ar);
3687 ath10k_pci_deinit_irq(ar);
3688 ath10k_pci_release_resource(ar);
3689 ath10k_pci_sleep_sync(ar);
3690 ath10k_pci_release(ar);
3691 ath10k_core_destroy(ar);
3692}
3693
3694MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3695
3696static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3697{
3698 struct ath10k *ar = dev_get_drvdata(dev);
3699 int ret;
3700
3701 ret = ath10k_pci_suspend(ar);
3702 if (ret)
3703 ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3704
3705 return ret;
3706}
3707
3708static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3709{
3710 struct ath10k *ar = dev_get_drvdata(dev);
3711 int ret;
3712
3713 ret = ath10k_pci_resume(ar);
3714 if (ret)
3715 ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3716
3717 return ret;
3718}
3719
3720static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3721 ath10k_pci_pm_suspend,
3722 ath10k_pci_pm_resume);
3723
3724static struct pci_driver ath10k_pci_driver = {
3725 .name = "ath10k_pci",
3726 .id_table = ath10k_pci_id_table,
3727 .probe = ath10k_pci_probe,
3728 .remove = ath10k_pci_remove,
3729#ifdef CONFIG_PM
3730 .driver.pm = &ath10k_pci_pm_ops,
3731#endif
3732};
3733
3734static int __init ath10k_pci_init(void)
3735{
3736 int ret;
3737
3738 ret = pci_register_driver(&ath10k_pci_driver);
3739 if (ret)
3740 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3741 ret);
3742
3743 ret = ath10k_ahb_init();
3744 if (ret)
3745 printk(KERN_ERR "ahb init failed: %d\n", ret);
3746
3747 return ret;
3748}
3749module_init(ath10k_pci_init);
3750
3751static void __exit ath10k_pci_exit(void)
3752{
3753 pci_unregister_driver(&ath10k_pci_driver);
3754 ath10k_ahb_exit();
3755}
3756
3757module_exit(ath10k_pci_exit);
3758
3759MODULE_AUTHOR("Qualcomm Atheros");
3760MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3761MODULE_LICENSE("Dual BSD/GPL");
3762
3763
3764MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3765MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3766MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3767MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3768MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3769MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3770
3771
3772MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3773MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3774MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3775
3776
3777MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3778MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3779MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3780MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3781
3782
3783MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3784MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3785MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3786MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3787MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3788
3789
3790MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3791MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3792MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
3793