1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
22#include <linux/bitops.h>
23
24#include "core.h"
25#include "debug.h"
26#include "coredump.h"
27
28#include "targaddrs.h"
29#include "bmi.h"
30
31#include "hif.h"
32#include "htc.h"
33
34#include "ce.h"
35#include "pci.h"
36
37enum ath10k_pci_reset_mode {
38 ATH10K_PCI_RESET_AUTO = 0,
39 ATH10K_PCI_RESET_WARM_ONLY = 1,
40};
41
42static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
43static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
44
45module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
46MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
47
48module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
49MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
50
51
52#define ATH10K_PCI_TARGET_WAIT 3000
53#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
54
55
56
57
58#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
59
60static const struct pci_device_id ath10k_pci_id_table[] = {
61
62 { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
63
64 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) },
65 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) },
66 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) },
67 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) },
68 { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) },
69 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) },
70 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) },
71 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) },
72 {0}
73};
74
75static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
76
77
78
79
80 { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
81 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
82
83 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
84 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
85 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
86 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
87 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
88
89 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
90 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
91 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
92 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
93 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
94
95 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
96
97 { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
98
99 { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
100
101 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
102 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
103
104 { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
105};
106
107static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
108static int ath10k_pci_cold_reset(struct ath10k *ar);
109static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
110static int ath10k_pci_init_irq(struct ath10k *ar);
111static int ath10k_pci_deinit_irq(struct ath10k *ar);
112static int ath10k_pci_request_irq(struct ath10k *ar);
113static void ath10k_pci_free_irq(struct ath10k *ar);
114static int ath10k_pci_bmi_wait(struct ath10k *ar,
115 struct ath10k_ce_pipe *tx_pipe,
116 struct ath10k_ce_pipe *rx_pipe,
117 struct bmi_xfer *xfer);
118static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
119static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
120static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
121static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
122static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
123static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
124static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
125
126static struct ce_attr host_ce_config_wlan[] = {
127
128 {
129 .flags = CE_ATTR_FLAGS,
130 .src_nentries = 16,
131 .src_sz_max = 256,
132 .dest_nentries = 0,
133 .send_cb = ath10k_pci_htc_tx_cb,
134 },
135
136
137 {
138 .flags = CE_ATTR_FLAGS,
139 .src_nentries = 0,
140 .src_sz_max = 2048,
141 .dest_nentries = 512,
142 .recv_cb = ath10k_pci_htt_htc_rx_cb,
143 },
144
145
146 {
147 .flags = CE_ATTR_FLAGS,
148 .src_nentries = 0,
149 .src_sz_max = 2048,
150 .dest_nentries = 128,
151 .recv_cb = ath10k_pci_htc_rx_cb,
152 },
153
154
155 {
156 .flags = CE_ATTR_FLAGS,
157 .src_nentries = 32,
158 .src_sz_max = 2048,
159 .dest_nentries = 0,
160 .send_cb = ath10k_pci_htc_tx_cb,
161 },
162
163
164 {
165 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
166 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
167 .src_sz_max = 256,
168 .dest_nentries = 0,
169 .send_cb = ath10k_pci_htt_tx_cb,
170 },
171
172
173 {
174 .flags = CE_ATTR_FLAGS,
175 .src_nentries = 0,
176 .src_sz_max = 512,
177 .dest_nentries = 512,
178 .recv_cb = ath10k_pci_htt_rx_cb,
179 },
180
181
182 {
183 .flags = CE_ATTR_FLAGS,
184 .src_nentries = 0,
185 .src_sz_max = 0,
186 .dest_nentries = 0,
187 },
188
189
190 {
191 .flags = CE_ATTR_FLAGS,
192 .src_nentries = 2,
193 .src_sz_max = DIAG_TRANSFER_LIMIT,
194 .dest_nentries = 2,
195 },
196
197
198 {
199 .flags = CE_ATTR_FLAGS,
200 .src_nentries = 0,
201 .src_sz_max = 2048,
202 .dest_nentries = 128,
203 .recv_cb = ath10k_pci_pktlog_rx_cb,
204 },
205
206
207 {
208 .flags = CE_ATTR_FLAGS,
209 .src_nentries = 0,
210 .src_sz_max = 0,
211 .dest_nentries = 0,
212 },
213
214
215 {
216 .flags = CE_ATTR_FLAGS,
217 .src_nentries = 0,
218 .src_sz_max = 0,
219 .dest_nentries = 0,
220 },
221
222
223 {
224 .flags = CE_ATTR_FLAGS,
225 .src_nentries = 0,
226 .src_sz_max = 0,
227 .dest_nentries = 0,
228 },
229};
230
231
232static struct ce_pipe_config target_ce_config_wlan[] = {
233
234 {
235 .pipenum = __cpu_to_le32(0),
236 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
237 .nentries = __cpu_to_le32(32),
238 .nbytes_max = __cpu_to_le32(256),
239 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
240 .reserved = __cpu_to_le32(0),
241 },
242
243
244 {
245 .pipenum = __cpu_to_le32(1),
246 .pipedir = __cpu_to_le32(PIPEDIR_IN),
247 .nentries = __cpu_to_le32(32),
248 .nbytes_max = __cpu_to_le32(2048),
249 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
250 .reserved = __cpu_to_le32(0),
251 },
252
253
254 {
255 .pipenum = __cpu_to_le32(2),
256 .pipedir = __cpu_to_le32(PIPEDIR_IN),
257 .nentries = __cpu_to_le32(64),
258 .nbytes_max = __cpu_to_le32(2048),
259 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
260 .reserved = __cpu_to_le32(0),
261 },
262
263
264 {
265 .pipenum = __cpu_to_le32(3),
266 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
267 .nentries = __cpu_to_le32(32),
268 .nbytes_max = __cpu_to_le32(2048),
269 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
270 .reserved = __cpu_to_le32(0),
271 },
272
273
274 {
275 .pipenum = __cpu_to_le32(4),
276 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
277 .nentries = __cpu_to_le32(256),
278 .nbytes_max = __cpu_to_le32(256),
279 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
280 .reserved = __cpu_to_le32(0),
281 },
282
283
284
285
286 {
287 .pipenum = __cpu_to_le32(5),
288 .pipedir = __cpu_to_le32(PIPEDIR_IN),
289 .nentries = __cpu_to_le32(32),
290 .nbytes_max = __cpu_to_le32(512),
291 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
292 .reserved = __cpu_to_le32(0),
293 },
294
295
296 {
297 .pipenum = __cpu_to_le32(6),
298 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
299 .nentries = __cpu_to_le32(32),
300 .nbytes_max = __cpu_to_le32(4096),
301 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
302 .reserved = __cpu_to_le32(0),
303 },
304
305
306 {
307 .pipenum = __cpu_to_le32(7),
308 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
309 .nentries = __cpu_to_le32(0),
310 .nbytes_max = __cpu_to_le32(0),
311 .flags = __cpu_to_le32(0),
312 .reserved = __cpu_to_le32(0),
313 },
314
315
316 {
317 .pipenum = __cpu_to_le32(8),
318 .pipedir = __cpu_to_le32(PIPEDIR_IN),
319 .nentries = __cpu_to_le32(64),
320 .nbytes_max = __cpu_to_le32(2048),
321 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
322 .reserved = __cpu_to_le32(0),
323 },
324
325
326 {
327 .pipenum = __cpu_to_le32(9),
328 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
329 .nentries = __cpu_to_le32(32),
330 .nbytes_max = __cpu_to_le32(2048),
331 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
332 .reserved = __cpu_to_le32(0),
333 },
334
335
336
337
338};
339
340
341
342
343
344
345static struct service_to_pipe target_service_to_ce_map_wlan[] = {
346 {
347 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
348 __cpu_to_le32(PIPEDIR_OUT),
349 __cpu_to_le32(3),
350 },
351 {
352 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
353 __cpu_to_le32(PIPEDIR_IN),
354 __cpu_to_le32(2),
355 },
356 {
357 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
358 __cpu_to_le32(PIPEDIR_OUT),
359 __cpu_to_le32(3),
360 },
361 {
362 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
363 __cpu_to_le32(PIPEDIR_IN),
364 __cpu_to_le32(2),
365 },
366 {
367 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
368 __cpu_to_le32(PIPEDIR_OUT),
369 __cpu_to_le32(3),
370 },
371 {
372 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
373 __cpu_to_le32(PIPEDIR_IN),
374 __cpu_to_le32(2),
375 },
376 {
377 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
378 __cpu_to_le32(PIPEDIR_OUT),
379 __cpu_to_le32(3),
380 },
381 {
382 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
383 __cpu_to_le32(PIPEDIR_IN),
384 __cpu_to_le32(2),
385 },
386 {
387 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
388 __cpu_to_le32(PIPEDIR_OUT),
389 __cpu_to_le32(3),
390 },
391 {
392 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
393 __cpu_to_le32(PIPEDIR_IN),
394 __cpu_to_le32(2),
395 },
396 {
397 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
398 __cpu_to_le32(PIPEDIR_OUT),
399 __cpu_to_le32(0),
400 },
401 {
402 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
403 __cpu_to_le32(PIPEDIR_IN),
404 __cpu_to_le32(1),
405 },
406 {
407 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
408 __cpu_to_le32(PIPEDIR_OUT),
409 __cpu_to_le32(0),
410 },
411 {
412 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
413 __cpu_to_le32(PIPEDIR_IN),
414 __cpu_to_le32(1),
415 },
416 {
417 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
418 __cpu_to_le32(PIPEDIR_OUT),
419 __cpu_to_le32(4),
420 },
421 {
422 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
423 __cpu_to_le32(PIPEDIR_IN),
424 __cpu_to_le32(5),
425 },
426
427
428
429 {
430 __cpu_to_le32(0),
431 __cpu_to_le32(0),
432 __cpu_to_le32(0),
433 },
434};
435
436static bool ath10k_pci_is_awake(struct ath10k *ar)
437{
438 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
439 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
440 RTC_STATE_ADDRESS);
441
442 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
443}
444
445static void __ath10k_pci_wake(struct ath10k *ar)
446{
447 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
448
449 lockdep_assert_held(&ar_pci->ps_lock);
450
451 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
452 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
453
454 iowrite32(PCIE_SOC_WAKE_V_MASK,
455 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
456 PCIE_SOC_WAKE_ADDRESS);
457}
458
459static void __ath10k_pci_sleep(struct ath10k *ar)
460{
461 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
462
463 lockdep_assert_held(&ar_pci->ps_lock);
464
465 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
466 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
467
468 iowrite32(PCIE_SOC_WAKE_RESET,
469 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
470 PCIE_SOC_WAKE_ADDRESS);
471 ar_pci->ps_awake = false;
472}
473
474static int ath10k_pci_wake_wait(struct ath10k *ar)
475{
476 int tot_delay = 0;
477 int curr_delay = 5;
478
479 while (tot_delay < PCIE_WAKE_TIMEOUT) {
480 if (ath10k_pci_is_awake(ar)) {
481 if (tot_delay > PCIE_WAKE_LATE_US)
482 ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
483 tot_delay / 1000);
484 return 0;
485 }
486
487 udelay(curr_delay);
488 tot_delay += curr_delay;
489
490 if (curr_delay < 50)
491 curr_delay += 5;
492 }
493
494 return -ETIMEDOUT;
495}
496
497static int ath10k_pci_force_wake(struct ath10k *ar)
498{
499 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
500 unsigned long flags;
501 int ret = 0;
502
503 if (ar_pci->pci_ps)
504 return ret;
505
506 spin_lock_irqsave(&ar_pci->ps_lock, flags);
507
508 if (!ar_pci->ps_awake) {
509 iowrite32(PCIE_SOC_WAKE_V_MASK,
510 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
511 PCIE_SOC_WAKE_ADDRESS);
512
513 ret = ath10k_pci_wake_wait(ar);
514 if (ret == 0)
515 ar_pci->ps_awake = true;
516 }
517
518 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
519
520 return ret;
521}
522
523static void ath10k_pci_force_sleep(struct ath10k *ar)
524{
525 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
526 unsigned long flags;
527
528 spin_lock_irqsave(&ar_pci->ps_lock, flags);
529
530 iowrite32(PCIE_SOC_WAKE_RESET,
531 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
532 PCIE_SOC_WAKE_ADDRESS);
533 ar_pci->ps_awake = false;
534
535 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
536}
537
538static int ath10k_pci_wake(struct ath10k *ar)
539{
540 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
541 unsigned long flags;
542 int ret = 0;
543
544 if (ar_pci->pci_ps == 0)
545 return ret;
546
547 spin_lock_irqsave(&ar_pci->ps_lock, flags);
548
549 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
550 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
551
552
553
554
555 if (!ar_pci->ps_awake) {
556 __ath10k_pci_wake(ar);
557
558 ret = ath10k_pci_wake_wait(ar);
559 if (ret == 0)
560 ar_pci->ps_awake = true;
561 }
562
563 if (ret == 0) {
564 ar_pci->ps_wake_refcount++;
565 WARN_ON(ar_pci->ps_wake_refcount == 0);
566 }
567
568 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
569
570 return ret;
571}
572
573static void ath10k_pci_sleep(struct ath10k *ar)
574{
575 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
576 unsigned long flags;
577
578 if (ar_pci->pci_ps == 0)
579 return;
580
581 spin_lock_irqsave(&ar_pci->ps_lock, flags);
582
583 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
584 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
585
586 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
587 goto skip;
588
589 ar_pci->ps_wake_refcount--;
590
591 mod_timer(&ar_pci->ps_timer, jiffies +
592 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
593
594skip:
595 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
596}
597
598static void ath10k_pci_ps_timer(struct timer_list *t)
599{
600 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
601 struct ath10k *ar = ar_pci->ar;
602 unsigned long flags;
603
604 spin_lock_irqsave(&ar_pci->ps_lock, flags);
605
606 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
607 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
608
609 if (ar_pci->ps_wake_refcount > 0)
610 goto skip;
611
612 __ath10k_pci_sleep(ar);
613
614skip:
615 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
616}
617
618static void ath10k_pci_sleep_sync(struct ath10k *ar)
619{
620 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
621 unsigned long flags;
622
623 if (ar_pci->pci_ps == 0) {
624 ath10k_pci_force_sleep(ar);
625 return;
626 }
627
628 del_timer_sync(&ar_pci->ps_timer);
629
630 spin_lock_irqsave(&ar_pci->ps_lock, flags);
631 WARN_ON(ar_pci->ps_wake_refcount > 0);
632 __ath10k_pci_sleep(ar);
633 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
634}
635
636static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
637{
638 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
639 int ret;
640
641 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
642 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
643 offset, offset + sizeof(value), ar_pci->mem_len);
644 return;
645 }
646
647 ret = ath10k_pci_wake(ar);
648 if (ret) {
649 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
650 value, offset, ret);
651 return;
652 }
653
654 iowrite32(value, ar_pci->mem + offset);
655 ath10k_pci_sleep(ar);
656}
657
658static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
659{
660 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
661 u32 val;
662 int ret;
663
664 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
665 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
666 offset, offset + sizeof(val), ar_pci->mem_len);
667 return 0;
668 }
669
670 ret = ath10k_pci_wake(ar);
671 if (ret) {
672 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
673 offset, ret);
674 return 0xffffffff;
675 }
676
677 val = ioread32(ar_pci->mem + offset);
678 ath10k_pci_sleep(ar);
679
680 return val;
681}
682
683inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
684{
685 struct ath10k_ce *ce = ath10k_ce_priv(ar);
686
687 ce->bus_ops->write32(ar, offset, value);
688}
689
690inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
691{
692 struct ath10k_ce *ce = ath10k_ce_priv(ar);
693
694 return ce->bus_ops->read32(ar, offset);
695}
696
697u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
698{
699 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
700}
701
702void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
703{
704 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
705}
706
707u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
708{
709 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
710}
711
712void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
713{
714 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
715}
716
717bool ath10k_pci_irq_pending(struct ath10k *ar)
718{
719 u32 cause;
720
721
722 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
723 PCIE_INTR_CAUSE_ADDRESS);
724 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
725 return true;
726
727 return false;
728}
729
730void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
731{
732
733
734
735
736 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
737 0);
738 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
739 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
740
741
742
743
744 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
745 PCIE_INTR_ENABLE_ADDRESS);
746}
747
748void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
749{
750 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
751 PCIE_INTR_ENABLE_ADDRESS,
752 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
753
754
755
756
757 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
758 PCIE_INTR_ENABLE_ADDRESS);
759}
760
761static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
762{
763 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
764
765 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
766 return "msi";
767
768 return "legacy";
769}
770
771static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
772{
773 struct ath10k *ar = pipe->hif_ce_state;
774 struct ath10k_ce *ce = ath10k_ce_priv(ar);
775 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
776 struct sk_buff *skb;
777 dma_addr_t paddr;
778 int ret;
779
780 skb = dev_alloc_skb(pipe->buf_sz);
781 if (!skb)
782 return -ENOMEM;
783
784 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
785
786 paddr = dma_map_single(ar->dev, skb->data,
787 skb->len + skb_tailroom(skb),
788 DMA_FROM_DEVICE);
789 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
790 ath10k_warn(ar, "failed to dma map pci rx buf\n");
791 dev_kfree_skb_any(skb);
792 return -EIO;
793 }
794
795 ATH10K_SKB_RXCB(skb)->paddr = paddr;
796
797 spin_lock_bh(&ce->ce_lock);
798 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
799 spin_unlock_bh(&ce->ce_lock);
800 if (ret) {
801 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
802 DMA_FROM_DEVICE);
803 dev_kfree_skb_any(skb);
804 return ret;
805 }
806
807 return 0;
808}
809
810static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
811{
812 struct ath10k *ar = pipe->hif_ce_state;
813 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
814 struct ath10k_ce *ce = ath10k_ce_priv(ar);
815 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
816 int ret, num;
817
818 if (pipe->buf_sz == 0)
819 return;
820
821 if (!ce_pipe->dest_ring)
822 return;
823
824 spin_lock_bh(&ce->ce_lock);
825 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
826 spin_unlock_bh(&ce->ce_lock);
827
828 while (num >= 0) {
829 ret = __ath10k_pci_rx_post_buf(pipe);
830 if (ret) {
831 if (ret == -ENOSPC)
832 break;
833 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
834 mod_timer(&ar_pci->rx_post_retry, jiffies +
835 ATH10K_PCI_RX_POST_RETRY_MS);
836 break;
837 }
838 num--;
839 }
840}
841
842void ath10k_pci_rx_post(struct ath10k *ar)
843{
844 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
845 int i;
846
847 for (i = 0; i < CE_COUNT; i++)
848 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
849}
850
851void ath10k_pci_rx_replenish_retry(struct timer_list *t)
852{
853 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
854 struct ath10k *ar = ar_pci->ar;
855
856 ath10k_pci_rx_post(ar);
857}
858
859static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
860{
861 u32 val = 0, region = addr & 0xfffff;
862
863 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
864 & 0x7ff) << 21;
865 val |= 0x100000 | region;
866 return val;
867}
868
869static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
870{
871 u32 val = 0, region = addr & 0xfffff;
872
873 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
874 val |= 0x100000 | region;
875 return val;
876}
877
878static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
879{
880 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
881
882 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
883 return -ENOTSUPP;
884
885 return ar_pci->targ_cpu_to_ce_addr(ar, addr);
886}
887
888
889
890
891
892
893static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
894 int nbytes)
895{
896 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
897 struct ath10k_ce *ce = ath10k_ce_priv(ar);
898 int ret = 0;
899 u32 *buf;
900 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
901 struct ath10k_ce_pipe *ce_diag;
902
903 u32 ce_data;
904 dma_addr_t ce_data_base = 0;
905 void *data_buf = NULL;
906 int i;
907
908 spin_lock_bh(&ce->ce_lock);
909
910 ce_diag = ar_pci->ce_diag;
911
912
913
914
915
916
917
918 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
919
920 data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev,
921 alloc_nbytes,
922 &ce_data_base,
923 GFP_ATOMIC);
924
925 if (!data_buf) {
926 ret = -ENOMEM;
927 goto done;
928 }
929
930 remaining_bytes = nbytes;
931 ce_data = ce_data_base;
932 while (remaining_bytes) {
933 nbytes = min_t(unsigned int, remaining_bytes,
934 DIAG_TRANSFER_LIMIT);
935
936 ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
937 if (ret != 0)
938 goto done;
939
940
941
942
943
944
945
946
947
948
949 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
950
951 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
952 0);
953 if (ret)
954 goto done;
955
956 i = 0;
957 while (ath10k_ce_completed_send_next_nolock(ce_diag,
958 NULL) != 0) {
959 mdelay(1);
960 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
961 ret = -EBUSY;
962 goto done;
963 }
964 }
965
966 i = 0;
967 while (ath10k_ce_completed_recv_next_nolock(ce_diag,
968 (void **)&buf,
969 &completed_nbytes)
970 != 0) {
971 mdelay(1);
972
973 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
974 ret = -EBUSY;
975 goto done;
976 }
977 }
978
979 if (nbytes != completed_nbytes) {
980 ret = -EIO;
981 goto done;
982 }
983
984 if (*buf != ce_data) {
985 ret = -EIO;
986 goto done;
987 }
988
989 remaining_bytes -= nbytes;
990 memcpy(data, data_buf, nbytes);
991
992 address += nbytes;
993 data += nbytes;
994 }
995
996done:
997
998 if (data_buf)
999 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1000 ce_data_base);
1001
1002 spin_unlock_bh(&ce->ce_lock);
1003
1004 return ret;
1005}
1006
1007static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1008{
1009 __le32 val = 0;
1010 int ret;
1011
1012 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1013 *value = __le32_to_cpu(val);
1014
1015 return ret;
1016}
1017
1018static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1019 u32 src, u32 len)
1020{
1021 u32 host_addr, addr;
1022 int ret;
1023
1024 host_addr = host_interest_item_address(src);
1025
1026 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1027 if (ret != 0) {
1028 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1029 src, ret);
1030 return ret;
1031 }
1032
1033 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1034 if (ret != 0) {
1035 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1036 addr, len, ret);
1037 return ret;
1038 }
1039
1040 return 0;
1041}
1042
1043#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
1044 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1045
1046int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1047 const void *data, int nbytes)
1048{
1049 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1050 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1051 int ret = 0;
1052 u32 *buf;
1053 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
1054 struct ath10k_ce_pipe *ce_diag;
1055 void *data_buf = NULL;
1056 u32 ce_data;
1057 dma_addr_t ce_data_base = 0;
1058 int i;
1059
1060 spin_lock_bh(&ce->ce_lock);
1061
1062 ce_diag = ar_pci->ce_diag;
1063
1064
1065
1066
1067
1068
1069
1070 orig_nbytes = nbytes;
1071 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1072 orig_nbytes,
1073 &ce_data_base,
1074 GFP_ATOMIC);
1075 if (!data_buf) {
1076 ret = -ENOMEM;
1077 goto done;
1078 }
1079
1080
1081 memcpy(data_buf, data, orig_nbytes);
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1094
1095 remaining_bytes = orig_nbytes;
1096 ce_data = ce_data_base;
1097 while (remaining_bytes) {
1098
1099 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1100
1101
1102 ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
1103 if (ret != 0)
1104 goto done;
1105
1106
1107
1108
1109
1110 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
1111 nbytes, 0, 0);
1112 if (ret != 0)
1113 goto done;
1114
1115 i = 0;
1116 while (ath10k_ce_completed_send_next_nolock(ce_diag,
1117 NULL) != 0) {
1118 mdelay(1);
1119
1120 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1121 ret = -EBUSY;
1122 goto done;
1123 }
1124 }
1125
1126 i = 0;
1127 while (ath10k_ce_completed_recv_next_nolock(ce_diag,
1128 (void **)&buf,
1129 &completed_nbytes)
1130 != 0) {
1131 mdelay(1);
1132
1133 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1134 ret = -EBUSY;
1135 goto done;
1136 }
1137 }
1138
1139 if (nbytes != completed_nbytes) {
1140 ret = -EIO;
1141 goto done;
1142 }
1143
1144 if (*buf != address) {
1145 ret = -EIO;
1146 goto done;
1147 }
1148
1149 remaining_bytes -= nbytes;
1150 address += nbytes;
1151 ce_data += nbytes;
1152 }
1153
1154done:
1155 if (data_buf) {
1156 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
1157 ce_data_base);
1158 }
1159
1160 if (ret != 0)
1161 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1162 address, ret);
1163
1164 spin_unlock_bh(&ce->ce_lock);
1165
1166 return ret;
1167}
1168
1169static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1170{
1171 __le32 val = __cpu_to_le32(value);
1172
1173 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1174}
1175
1176
1177static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1178{
1179 struct ath10k *ar = ce_state->ar;
1180 struct sk_buff_head list;
1181 struct sk_buff *skb;
1182
1183 __skb_queue_head_init(&list);
1184 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1185
1186 if (skb == NULL)
1187 continue;
1188
1189 __skb_queue_tail(&list, skb);
1190 }
1191
1192 while ((skb = __skb_dequeue(&list)))
1193 ath10k_htc_tx_completion_handler(ar, skb);
1194}
1195
1196static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1197 void (*callback)(struct ath10k *ar,
1198 struct sk_buff *skb))
1199{
1200 struct ath10k *ar = ce_state->ar;
1201 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1202 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1203 struct sk_buff *skb;
1204 struct sk_buff_head list;
1205 void *transfer_context;
1206 unsigned int nbytes, max_nbytes;
1207
1208 __skb_queue_head_init(&list);
1209 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1210 &nbytes) == 0) {
1211 skb = transfer_context;
1212 max_nbytes = skb->len + skb_tailroom(skb);
1213 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1214 max_nbytes, DMA_FROM_DEVICE);
1215
1216 if (unlikely(max_nbytes < nbytes)) {
1217 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1218 nbytes, max_nbytes);
1219 dev_kfree_skb_any(skb);
1220 continue;
1221 }
1222
1223 skb_put(skb, nbytes);
1224 __skb_queue_tail(&list, skb);
1225 }
1226
1227 while ((skb = __skb_dequeue(&list))) {
1228 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1229 ce_state->id, skb->len);
1230 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1231 skb->data, skb->len);
1232
1233 callback(ar, skb);
1234 }
1235
1236 ath10k_pci_rx_post_pipe(pipe_info);
1237}
1238
1239static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1240 void (*callback)(struct ath10k *ar,
1241 struct sk_buff *skb))
1242{
1243 struct ath10k *ar = ce_state->ar;
1244 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1245 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1246 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1247 struct sk_buff *skb;
1248 struct sk_buff_head list;
1249 void *transfer_context;
1250 unsigned int nbytes, max_nbytes, nentries;
1251 int orig_len;
1252
1253
1254
1255
1256
1257 __skb_queue_head_init(&list);
1258 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1259 &nbytes) == 0) {
1260 skb = transfer_context;
1261 max_nbytes = skb->len + skb_tailroom(skb);
1262
1263 if (unlikely(max_nbytes < nbytes)) {
1264 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1265 nbytes, max_nbytes);
1266 continue;
1267 }
1268
1269 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1270 max_nbytes, DMA_FROM_DEVICE);
1271 skb_put(skb, nbytes);
1272 __skb_queue_tail(&list, skb);
1273 }
1274
1275 nentries = skb_queue_len(&list);
1276 while ((skb = __skb_dequeue(&list))) {
1277 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1278 ce_state->id, skb->len);
1279 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1280 skb->data, skb->len);
1281
1282 orig_len = skb->len;
1283 callback(ar, skb);
1284 skb_push(skb, orig_len - skb->len);
1285 skb_reset_tail_pointer(skb);
1286 skb_trim(skb, 0);
1287
1288
1289 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1290 skb->len + skb_tailroom(skb),
1291 DMA_FROM_DEVICE);
1292 }
1293 ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1294}
1295
1296
1297static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1298{
1299 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1300}
1301
1302static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1303{
1304
1305
1306
1307 ath10k_ce_per_engine_service(ce_state->ar, 4);
1308
1309 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1310}
1311
1312
1313
1314
1315static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1316{
1317 ath10k_pci_process_rx_cb(ce_state,
1318 ath10k_htt_rx_pktlog_completion_handler);
1319}
1320
1321
1322static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1323{
1324 struct ath10k *ar = ce_state->ar;
1325 struct sk_buff *skb;
1326
1327 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1328
1329 if (!skb)
1330 continue;
1331
1332 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1333 skb->len, DMA_TO_DEVICE);
1334 ath10k_htt_hif_tx_complete(ar, skb);
1335 }
1336}
1337
1338static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1339{
1340 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1341 ath10k_htt_t2h_msg_handler(ar, skb);
1342}
1343
1344
1345static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1346{
1347
1348
1349
1350 ath10k_ce_per_engine_service(ce_state->ar, 4);
1351
1352 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1353}
1354
1355int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1356 struct ath10k_hif_sg_item *items, int n_items)
1357{
1358 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1359 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1360 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1361 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1362 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1363 unsigned int nentries_mask;
1364 unsigned int sw_index;
1365 unsigned int write_index;
1366 int err, i = 0;
1367
1368 spin_lock_bh(&ce->ce_lock);
1369
1370 nentries_mask = src_ring->nentries_mask;
1371 sw_index = src_ring->sw_index;
1372 write_index = src_ring->write_index;
1373
1374 if (unlikely(CE_RING_DELTA(nentries_mask,
1375 write_index, sw_index - 1) < n_items)) {
1376 err = -ENOBUFS;
1377 goto err;
1378 }
1379
1380 for (i = 0; i < n_items - 1; i++) {
1381 ath10k_dbg(ar, ATH10K_DBG_PCI,
1382 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1383 i, items[i].paddr, items[i].len, n_items);
1384 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1385 items[i].vaddr, items[i].len);
1386
1387 err = ath10k_ce_send_nolock(ce_pipe,
1388 items[i].transfer_context,
1389 items[i].paddr,
1390 items[i].len,
1391 items[i].transfer_id,
1392 CE_SEND_FLAG_GATHER);
1393 if (err)
1394 goto err;
1395 }
1396
1397
1398
1399 ath10k_dbg(ar, ATH10K_DBG_PCI,
1400 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1401 i, items[i].paddr, items[i].len, n_items);
1402 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1403 items[i].vaddr, items[i].len);
1404
1405 err = ath10k_ce_send_nolock(ce_pipe,
1406 items[i].transfer_context,
1407 items[i].paddr,
1408 items[i].len,
1409 items[i].transfer_id,
1410 0);
1411 if (err)
1412 goto err;
1413
1414 spin_unlock_bh(&ce->ce_lock);
1415 return 0;
1416
1417err:
1418 for (; i > 0; i--)
1419 __ath10k_ce_send_revert(ce_pipe);
1420
1421 spin_unlock_bh(&ce->ce_lock);
1422 return err;
1423}
1424
1425int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1426 size_t buf_len)
1427{
1428 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1429}
1430
1431u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1432{
1433 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1434
1435 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1436
1437 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1438}
1439
1440static void ath10k_pci_dump_registers(struct ath10k *ar,
1441 struct ath10k_fw_crash_data *crash_data)
1442{
1443 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1444 int i, ret;
1445
1446 lockdep_assert_held(&ar->data_lock);
1447
1448 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
1449 hi_failure_state,
1450 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1451 if (ret) {
1452 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1453 return;
1454 }
1455
1456 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1457
1458 ath10k_err(ar, "firmware register dump:\n");
1459 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1460 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1461 i,
1462 __le32_to_cpu(reg_dump_values[i]),
1463 __le32_to_cpu(reg_dump_values[i + 1]),
1464 __le32_to_cpu(reg_dump_values[i + 2]),
1465 __le32_to_cpu(reg_dump_values[i + 3]));
1466
1467 if (!crash_data)
1468 return;
1469
1470 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1471 crash_data->registers[i] = reg_dump_values[i];
1472}
1473
1474static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1475 const struct ath10k_mem_region *mem_region,
1476 u8 *buf, size_t buf_len)
1477{
1478 const struct ath10k_mem_section *cur_section, *next_section;
1479 unsigned int count, section_size, skip_size;
1480 int ret, i, j;
1481
1482 if (!mem_region || !buf)
1483 return 0;
1484
1485 cur_section = &mem_region->section_table.sections[0];
1486
1487 if (mem_region->start > cur_section->start) {
1488 ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1489 mem_region->start, cur_section->start);
1490 return 0;
1491 }
1492
1493 skip_size = cur_section->start - mem_region->start;
1494
1495
1496
1497
1498 for (i = 0; i < skip_size; i++) {
1499 *buf = ATH10K_MAGIC_NOT_COPIED;
1500 buf++;
1501 }
1502
1503 count = 0;
1504
1505 for (i = 0; cur_section != NULL; i++) {
1506 section_size = cur_section->end - cur_section->start;
1507
1508 if (section_size <= 0) {
1509 ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1510 cur_section->start,
1511 cur_section->end);
1512 break;
1513 }
1514
1515 if ((i + 1) == mem_region->section_table.size) {
1516
1517 next_section = NULL;
1518 skip_size = 0;
1519 } else {
1520 next_section = cur_section + 1;
1521
1522 if (cur_section->end > next_section->start) {
1523 ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1524 next_section->start,
1525 cur_section->end);
1526 break;
1527 }
1528
1529 skip_size = next_section->start - cur_section->end;
1530 }
1531
1532 if (buf_len < (skip_size + section_size)) {
1533 ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1534 break;
1535 }
1536
1537 buf_len -= skip_size + section_size;
1538
1539
1540 ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1541 buf, section_size);
1542 if (ret) {
1543 ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1544 cur_section->start, ret);
1545 break;
1546 }
1547
1548 buf += section_size;
1549 count += section_size;
1550
1551
1552 for (j = 0; j < skip_size; j++) {
1553 *buf = ATH10K_MAGIC_NOT_COPIED;
1554 buf++;
1555 }
1556
1557 count += skip_size;
1558
1559 if (!next_section)
1560
1561 break;
1562
1563 cur_section = next_section;
1564 }
1565
1566 return count;
1567}
1568
1569static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1570{
1571 u32 val;
1572
1573 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1574 FW_RAM_CONFIG_ADDRESS, config);
1575
1576 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1577 FW_RAM_CONFIG_ADDRESS);
1578 if (val != config) {
1579 ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1580 val, config);
1581 return -EIO;
1582 }
1583
1584 return 0;
1585}
1586
1587static void ath10k_pci_dump_memory(struct ath10k *ar,
1588 struct ath10k_fw_crash_data *crash_data)
1589{
1590 const struct ath10k_hw_mem_layout *mem_layout;
1591 const struct ath10k_mem_region *current_region;
1592 struct ath10k_dump_ram_data_hdr *hdr;
1593 u32 count, shift;
1594 size_t buf_len;
1595 int ret, i;
1596 u8 *buf;
1597
1598 lockdep_assert_held(&ar->data_lock);
1599
1600 if (!crash_data)
1601 return;
1602
1603 mem_layout = ath10k_coredump_get_mem_layout(ar);
1604 if (!mem_layout)
1605 return;
1606
1607 current_region = &mem_layout->region_table.regions[0];
1608
1609 buf = crash_data->ramdump_buf;
1610 buf_len = crash_data->ramdump_buf_len;
1611
1612 memset(buf, 0, buf_len);
1613
1614 for (i = 0; i < mem_layout->region_table.size; i++) {
1615 count = 0;
1616
1617 if (current_region->len > buf_len) {
1618 ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1619 current_region->name,
1620 current_region->len,
1621 buf_len);
1622 break;
1623 }
1624
1625
1626
1627
1628 if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1629 current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1630 shift = current_region->start >> 20;
1631
1632 ret = ath10k_pci_set_ram_config(ar, shift);
1633 if (ret) {
1634 ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1635 current_region->name, ret);
1636 break;
1637 }
1638 }
1639
1640
1641 hdr = (void *)buf;
1642 buf += sizeof(*hdr);
1643 buf_len -= sizeof(*hdr);
1644
1645 if (current_region->section_table.size > 0) {
1646
1647 count = ath10k_pci_dump_memory_section(ar,
1648 current_region,
1649 buf,
1650 current_region->len);
1651 } else {
1652
1653
1654
1655 ret = ath10k_pci_diag_read_mem(ar,
1656 current_region->start,
1657 buf,
1658 current_region->len);
1659 if (ret) {
1660 ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1661 current_region->name, ret);
1662 break;
1663 }
1664
1665 count = current_region->len;
1666 }
1667
1668 hdr->region_type = cpu_to_le32(current_region->type);
1669 hdr->start = cpu_to_le32(current_region->start);
1670 hdr->length = cpu_to_le32(count);
1671
1672 if (count == 0)
1673
1674 break;
1675
1676 buf += count;
1677 buf_len -= count;
1678
1679 current_region++;
1680 }
1681}
1682
1683static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1684{
1685 struct ath10k_fw_crash_data *crash_data;
1686 char guid[UUID_STRING_LEN + 1];
1687
1688 spin_lock_bh(&ar->data_lock);
1689
1690 ar->stats.fw_crash_counter++;
1691
1692 crash_data = ath10k_coredump_new(ar);
1693
1694 if (crash_data)
1695 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1696 else
1697 scnprintf(guid, sizeof(guid), "n/a");
1698
1699 ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1700 ath10k_print_driver_info(ar);
1701 ath10k_pci_dump_registers(ar, crash_data);
1702 ath10k_ce_dump_registers(ar, crash_data);
1703 ath10k_pci_dump_memory(ar, crash_data);
1704
1705 spin_unlock_bh(&ar->data_lock);
1706
1707 queue_work(ar->workqueue, &ar->restart_work);
1708}
1709
1710void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1711 int force)
1712{
1713 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1714
1715 if (!force) {
1716 int resources;
1717
1718
1719
1720
1721
1722
1723
1724 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1725
1726
1727
1728
1729
1730 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1731 return;
1732 }
1733 ath10k_ce_per_engine_service(ar, pipe);
1734}
1735
1736static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1737{
1738 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1739
1740 del_timer_sync(&ar_pci->rx_post_retry);
1741}
1742
1743int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1744 u8 *ul_pipe, u8 *dl_pipe)
1745{
1746 const struct service_to_pipe *entry;
1747 bool ul_set = false, dl_set = false;
1748 int i;
1749
1750 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1751
1752 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1753 entry = &target_service_to_ce_map_wlan[i];
1754
1755 if (__le32_to_cpu(entry->service_id) != service_id)
1756 continue;
1757
1758 switch (__le32_to_cpu(entry->pipedir)) {
1759 case PIPEDIR_NONE:
1760 break;
1761 case PIPEDIR_IN:
1762 WARN_ON(dl_set);
1763 *dl_pipe = __le32_to_cpu(entry->pipenum);
1764 dl_set = true;
1765 break;
1766 case PIPEDIR_OUT:
1767 WARN_ON(ul_set);
1768 *ul_pipe = __le32_to_cpu(entry->pipenum);
1769 ul_set = true;
1770 break;
1771 case PIPEDIR_INOUT:
1772 WARN_ON(dl_set);
1773 WARN_ON(ul_set);
1774 *dl_pipe = __le32_to_cpu(entry->pipenum);
1775 *ul_pipe = __le32_to_cpu(entry->pipenum);
1776 dl_set = true;
1777 ul_set = true;
1778 break;
1779 }
1780 }
1781
1782 if (WARN_ON(!ul_set || !dl_set))
1783 return -ENOENT;
1784
1785 return 0;
1786}
1787
1788void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1789 u8 *ul_pipe, u8 *dl_pipe)
1790{
1791 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1792
1793 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1794 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1795 ul_pipe, dl_pipe);
1796}
1797
1798void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1799{
1800 u32 val;
1801
1802 switch (ar->hw_rev) {
1803 case ATH10K_HW_QCA988X:
1804 case ATH10K_HW_QCA9887:
1805 case ATH10K_HW_QCA6174:
1806 case ATH10K_HW_QCA9377:
1807 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1808 CORE_CTRL_ADDRESS);
1809 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1810 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1811 CORE_CTRL_ADDRESS, val);
1812 break;
1813 case ATH10K_HW_QCA99X0:
1814 case ATH10K_HW_QCA9984:
1815 case ATH10K_HW_QCA9888:
1816 case ATH10K_HW_QCA4019:
1817
1818
1819
1820 break;
1821 case ATH10K_HW_WCN3990:
1822 break;
1823 }
1824}
1825
1826static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1827{
1828 u32 val;
1829
1830 switch (ar->hw_rev) {
1831 case ATH10K_HW_QCA988X:
1832 case ATH10K_HW_QCA9887:
1833 case ATH10K_HW_QCA6174:
1834 case ATH10K_HW_QCA9377:
1835 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1836 CORE_CTRL_ADDRESS);
1837 val |= CORE_CTRL_PCIE_REG_31_MASK;
1838 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1839 CORE_CTRL_ADDRESS, val);
1840 break;
1841 case ATH10K_HW_QCA99X0:
1842 case ATH10K_HW_QCA9984:
1843 case ATH10K_HW_QCA9888:
1844 case ATH10K_HW_QCA4019:
1845
1846
1847
1848 break;
1849 case ATH10K_HW_WCN3990:
1850 break;
1851 }
1852}
1853
1854static void ath10k_pci_irq_disable(struct ath10k *ar)
1855{
1856 ath10k_ce_disable_interrupts(ar);
1857 ath10k_pci_disable_and_clear_legacy_irq(ar);
1858 ath10k_pci_irq_msi_fw_mask(ar);
1859}
1860
1861static void ath10k_pci_irq_sync(struct ath10k *ar)
1862{
1863 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1864
1865 synchronize_irq(ar_pci->pdev->irq);
1866}
1867
1868static void ath10k_pci_irq_enable(struct ath10k *ar)
1869{
1870 ath10k_ce_enable_interrupts(ar);
1871 ath10k_pci_enable_legacy_irq(ar);
1872 ath10k_pci_irq_msi_fw_unmask(ar);
1873}
1874
1875static int ath10k_pci_hif_start(struct ath10k *ar)
1876{
1877 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1878
1879 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1880
1881 napi_enable(&ar->napi);
1882
1883 ath10k_pci_irq_enable(ar);
1884 ath10k_pci_rx_post(ar);
1885
1886 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1887 ar_pci->link_ctl);
1888
1889 return 0;
1890}
1891
1892static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1893{
1894 struct ath10k *ar;
1895 struct ath10k_ce_pipe *ce_pipe;
1896 struct ath10k_ce_ring *ce_ring;
1897 struct sk_buff *skb;
1898 int i;
1899
1900 ar = pci_pipe->hif_ce_state;
1901 ce_pipe = pci_pipe->ce_hdl;
1902 ce_ring = ce_pipe->dest_ring;
1903
1904 if (!ce_ring)
1905 return;
1906
1907 if (!pci_pipe->buf_sz)
1908 return;
1909
1910 for (i = 0; i < ce_ring->nentries; i++) {
1911 skb = ce_ring->per_transfer_context[i];
1912 if (!skb)
1913 continue;
1914
1915 ce_ring->per_transfer_context[i] = NULL;
1916
1917 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1918 skb->len + skb_tailroom(skb),
1919 DMA_FROM_DEVICE);
1920 dev_kfree_skb_any(skb);
1921 }
1922}
1923
1924static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1925{
1926 struct ath10k *ar;
1927 struct ath10k_ce_pipe *ce_pipe;
1928 struct ath10k_ce_ring *ce_ring;
1929 struct sk_buff *skb;
1930 int i;
1931
1932 ar = pci_pipe->hif_ce_state;
1933 ce_pipe = pci_pipe->ce_hdl;
1934 ce_ring = ce_pipe->src_ring;
1935
1936 if (!ce_ring)
1937 return;
1938
1939 if (!pci_pipe->buf_sz)
1940 return;
1941
1942 for (i = 0; i < ce_ring->nentries; i++) {
1943 skb = ce_ring->per_transfer_context[i];
1944 if (!skb)
1945 continue;
1946
1947 ce_ring->per_transfer_context[i] = NULL;
1948
1949 ath10k_htc_tx_completion_handler(ar, skb);
1950 }
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1962{
1963 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1964 int pipe_num;
1965
1966 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1967 struct ath10k_pci_pipe *pipe_info;
1968
1969 pipe_info = &ar_pci->pipe_info[pipe_num];
1970 ath10k_pci_rx_pipe_cleanup(pipe_info);
1971 ath10k_pci_tx_pipe_cleanup(pipe_info);
1972 }
1973}
1974
1975void ath10k_pci_ce_deinit(struct ath10k *ar)
1976{
1977 int i;
1978
1979 for (i = 0; i < CE_COUNT; i++)
1980 ath10k_ce_deinit_pipe(ar, i);
1981}
1982
1983void ath10k_pci_flush(struct ath10k *ar)
1984{
1985 ath10k_pci_rx_retry_sync(ar);
1986 ath10k_pci_buffer_cleanup(ar);
1987}
1988
1989static void ath10k_pci_hif_stop(struct ath10k *ar)
1990{
1991 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1992 unsigned long flags;
1993
1994 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007 ath10k_pci_safe_chip_reset(ar);
2008
2009 ath10k_pci_irq_disable(ar);
2010 ath10k_pci_irq_sync(ar);
2011 ath10k_pci_flush(ar);
2012 napi_synchronize(&ar->napi);
2013 napi_disable(&ar->napi);
2014
2015 spin_lock_irqsave(&ar_pci->ps_lock, flags);
2016 WARN_ON(ar_pci->ps_wake_refcount > 0);
2017 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2018}
2019
2020int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2021 void *req, u32 req_len,
2022 void *resp, u32 *resp_len)
2023{
2024 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2025 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2026 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2027 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2028 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2029 dma_addr_t req_paddr = 0;
2030 dma_addr_t resp_paddr = 0;
2031 struct bmi_xfer xfer = {};
2032 void *treq, *tresp = NULL;
2033 int ret = 0;
2034
2035 might_sleep();
2036
2037 if (resp && !resp_len)
2038 return -EINVAL;
2039
2040 if (resp && resp_len && *resp_len == 0)
2041 return -EINVAL;
2042
2043 treq = kmemdup(req, req_len, GFP_KERNEL);
2044 if (!treq)
2045 return -ENOMEM;
2046
2047 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2048 ret = dma_mapping_error(ar->dev, req_paddr);
2049 if (ret) {
2050 ret = -EIO;
2051 goto err_dma;
2052 }
2053
2054 if (resp && resp_len) {
2055 tresp = kzalloc(*resp_len, GFP_KERNEL);
2056 if (!tresp) {
2057 ret = -ENOMEM;
2058 goto err_req;
2059 }
2060
2061 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2062 DMA_FROM_DEVICE);
2063 ret = dma_mapping_error(ar->dev, resp_paddr);
2064 if (ret) {
2065 ret = -EIO;
2066 goto err_req;
2067 }
2068
2069 xfer.wait_for_resp = true;
2070 xfer.resp_len = 0;
2071
2072 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2073 }
2074
2075 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2076 if (ret)
2077 goto err_resp;
2078
2079 ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2080 if (ret) {
2081 dma_addr_t unused_buffer;
2082 unsigned int unused_nbytes;
2083 unsigned int unused_id;
2084
2085 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2086 &unused_nbytes, &unused_id);
2087 } else {
2088
2089 ret = 0;
2090 }
2091
2092err_resp:
2093 if (resp) {
2094 dma_addr_t unused_buffer;
2095
2096 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2097 dma_unmap_single(ar->dev, resp_paddr,
2098 *resp_len, DMA_FROM_DEVICE);
2099 }
2100err_req:
2101 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2102
2103 if (ret == 0 && resp_len) {
2104 *resp_len = min(*resp_len, xfer.resp_len);
2105 memcpy(resp, tresp, xfer.resp_len);
2106 }
2107err_dma:
2108 kfree(treq);
2109 kfree(tresp);
2110
2111 return ret;
2112}
2113
2114static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2115{
2116 struct bmi_xfer *xfer;
2117
2118 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2119 return;
2120
2121 xfer->tx_done = true;
2122}
2123
2124static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2125{
2126 struct ath10k *ar = ce_state->ar;
2127 struct bmi_xfer *xfer;
2128 unsigned int nbytes;
2129
2130 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2131 &nbytes))
2132 return;
2133
2134 if (WARN_ON_ONCE(!xfer))
2135 return;
2136
2137 if (!xfer->wait_for_resp) {
2138 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2139 return;
2140 }
2141
2142 xfer->resp_len = nbytes;
2143 xfer->rx_done = true;
2144}
2145
2146static int ath10k_pci_bmi_wait(struct ath10k *ar,
2147 struct ath10k_ce_pipe *tx_pipe,
2148 struct ath10k_ce_pipe *rx_pipe,
2149 struct bmi_xfer *xfer)
2150{
2151 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2152 unsigned long started = jiffies;
2153 unsigned long dur;
2154 int ret;
2155
2156 while (time_before_eq(jiffies, timeout)) {
2157 ath10k_pci_bmi_send_done(tx_pipe);
2158 ath10k_pci_bmi_recv_data(rx_pipe);
2159
2160 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2161 ret = 0;
2162 goto out;
2163 }
2164
2165 schedule();
2166 }
2167
2168 ret = -ETIMEDOUT;
2169
2170out:
2171 dur = jiffies - started;
2172 if (dur > HZ)
2173 ath10k_dbg(ar, ATH10K_DBG_BMI,
2174 "bmi cmd took %lu jiffies hz %d ret %d\n",
2175 dur, HZ, ret);
2176 return ret;
2177}
2178
2179
2180
2181
2182
2183static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2184{
2185 u32 addr, val;
2186
2187 addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2188 val = ath10k_pci_read32(ar, addr);
2189 val |= CORE_CTRL_CPU_INTR_MASK;
2190 ath10k_pci_write32(ar, addr, val);
2191
2192 return 0;
2193}
2194
2195static int ath10k_pci_get_num_banks(struct ath10k *ar)
2196{
2197 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2198
2199 switch (ar_pci->pdev->device) {
2200 case QCA988X_2_0_DEVICE_ID_UBNT:
2201 case QCA988X_2_0_DEVICE_ID:
2202 case QCA99X0_2_0_DEVICE_ID:
2203 case QCA9888_2_0_DEVICE_ID:
2204 case QCA9984_1_0_DEVICE_ID:
2205 case QCA9887_1_0_DEVICE_ID:
2206 return 1;
2207 case QCA6164_2_1_DEVICE_ID:
2208 case QCA6174_2_1_DEVICE_ID:
2209 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
2210 case QCA6174_HW_1_0_CHIP_ID_REV:
2211 case QCA6174_HW_1_1_CHIP_ID_REV:
2212 case QCA6174_HW_2_1_CHIP_ID_REV:
2213 case QCA6174_HW_2_2_CHIP_ID_REV:
2214 return 3;
2215 case QCA6174_HW_1_3_CHIP_ID_REV:
2216 return 2;
2217 case QCA6174_HW_3_0_CHIP_ID_REV:
2218 case QCA6174_HW_3_1_CHIP_ID_REV:
2219 case QCA6174_HW_3_2_CHIP_ID_REV:
2220 return 9;
2221 }
2222 break;
2223 case QCA9377_1_0_DEVICE_ID:
2224 return 4;
2225 }
2226
2227 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2228 return 1;
2229}
2230
2231static int ath10k_bus_get_num_banks(struct ath10k *ar)
2232{
2233 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2234
2235 return ce->bus_ops->get_num_banks(ar);
2236}
2237
2238int ath10k_pci_init_config(struct ath10k *ar)
2239{
2240 u32 interconnect_targ_addr;
2241 u32 pcie_state_targ_addr = 0;
2242 u32 pipe_cfg_targ_addr = 0;
2243 u32 svc_to_pipe_map = 0;
2244 u32 pcie_config_flags = 0;
2245 u32 ealloc_value;
2246 u32 ealloc_targ_addr;
2247 u32 flag2_value;
2248 u32 flag2_targ_addr;
2249 int ret = 0;
2250
2251
2252 interconnect_targ_addr =
2253 host_interest_item_address(HI_ITEM(hi_interconnect_state));
2254
2255
2256 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2257 &pcie_state_targ_addr);
2258 if (ret != 0) {
2259 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2260 return ret;
2261 }
2262
2263 if (pcie_state_targ_addr == 0) {
2264 ret = -EIO;
2265 ath10k_err(ar, "Invalid pcie state addr\n");
2266 return ret;
2267 }
2268
2269 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2270 offsetof(struct pcie_state,
2271 pipe_cfg_addr)),
2272 &pipe_cfg_targ_addr);
2273 if (ret != 0) {
2274 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2275 return ret;
2276 }
2277
2278 if (pipe_cfg_targ_addr == 0) {
2279 ret = -EIO;
2280 ath10k_err(ar, "Invalid pipe cfg addr\n");
2281 return ret;
2282 }
2283
2284 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2285 target_ce_config_wlan,
2286 sizeof(struct ce_pipe_config) *
2287 NUM_TARGET_CE_CONFIG_WLAN);
2288
2289 if (ret != 0) {
2290 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2291 return ret;
2292 }
2293
2294 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2295 offsetof(struct pcie_state,
2296 svc_to_pipe_map)),
2297 &svc_to_pipe_map);
2298 if (ret != 0) {
2299 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2300 return ret;
2301 }
2302
2303 if (svc_to_pipe_map == 0) {
2304 ret = -EIO;
2305 ath10k_err(ar, "Invalid svc_to_pipe map\n");
2306 return ret;
2307 }
2308
2309 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2310 target_service_to_ce_map_wlan,
2311 sizeof(target_service_to_ce_map_wlan));
2312 if (ret != 0) {
2313 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2314 return ret;
2315 }
2316
2317 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2318 offsetof(struct pcie_state,
2319 config_flags)),
2320 &pcie_config_flags);
2321 if (ret != 0) {
2322 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2323 return ret;
2324 }
2325
2326 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2327
2328 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2329 offsetof(struct pcie_state,
2330 config_flags)),
2331 pcie_config_flags);
2332 if (ret != 0) {
2333 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2334 return ret;
2335 }
2336
2337
2338 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2339
2340 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2341 if (ret != 0) {
2342 ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2343 return ret;
2344 }
2345
2346
2347 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2348 HI_EARLY_ALLOC_MAGIC_MASK);
2349 ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2350 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2351 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2352
2353 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2354 if (ret != 0) {
2355 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2356 return ret;
2357 }
2358
2359
2360 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2361
2362 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2363 if (ret != 0) {
2364 ath10k_err(ar, "Failed to get option val: %d\n", ret);
2365 return ret;
2366 }
2367
2368 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2369
2370 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2371 if (ret != 0) {
2372 ath10k_err(ar, "Failed to set option val: %d\n", ret);
2373 return ret;
2374 }
2375
2376 return 0;
2377}
2378
2379static void ath10k_pci_override_ce_config(struct ath10k *ar)
2380{
2381 struct ce_attr *attr;
2382 struct ce_pipe_config *config;
2383
2384
2385
2386
2387
2388
2389 attr = &host_ce_config_wlan[5];
2390 attr->src_sz_max = 0;
2391 attr->dest_nentries = 0;
2392
2393
2394 config = &target_ce_config_wlan[5];
2395 config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2396 config->nbytes_max = __cpu_to_le32(2048);
2397
2398
2399 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2400}
2401
2402int ath10k_pci_alloc_pipes(struct ath10k *ar)
2403{
2404 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2405 struct ath10k_pci_pipe *pipe;
2406 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2407 int i, ret;
2408
2409 for (i = 0; i < CE_COUNT; i++) {
2410 pipe = &ar_pci->pipe_info[i];
2411 pipe->ce_hdl = &ce->ce_states[i];
2412 pipe->pipe_num = i;
2413 pipe->hif_ce_state = ar;
2414
2415 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
2416 if (ret) {
2417 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2418 i, ret);
2419 return ret;
2420 }
2421
2422
2423 if (i == CE_DIAG_PIPE) {
2424 ar_pci->ce_diag = pipe->ce_hdl;
2425 continue;
2426 }
2427
2428 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2429 }
2430
2431 return 0;
2432}
2433
2434void ath10k_pci_free_pipes(struct ath10k *ar)
2435{
2436 int i;
2437
2438 for (i = 0; i < CE_COUNT; i++)
2439 ath10k_ce_free_pipe(ar, i);
2440}
2441
2442int ath10k_pci_init_pipes(struct ath10k *ar)
2443{
2444 int i, ret;
2445
2446 for (i = 0; i < CE_COUNT; i++) {
2447 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2448 if (ret) {
2449 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2450 i, ret);
2451 return ret;
2452 }
2453 }
2454
2455 return 0;
2456}
2457
2458static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2459{
2460 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2461 FW_IND_EVENT_PENDING;
2462}
2463
2464static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2465{
2466 u32 val;
2467
2468 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2469 val &= ~FW_IND_EVENT_PENDING;
2470 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2471}
2472
2473static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2474{
2475 u32 val;
2476
2477 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2478 return (val == 0xffffffff);
2479}
2480
2481
2482static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2483{
2484 u32 val;
2485
2486 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2487 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2488 val | SOC_RESET_CONTROL_SI0_RST_MASK);
2489 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2490
2491 msleep(10);
2492
2493 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2494 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2495 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2496 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2497
2498 msleep(10);
2499}
2500
2501static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2502{
2503 u32 val;
2504
2505 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2506
2507 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2508 SOC_RESET_CONTROL_ADDRESS);
2509 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2510 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2511}
2512
2513static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2514{
2515 u32 val;
2516
2517 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2518 SOC_RESET_CONTROL_ADDRESS);
2519
2520 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2521 val | SOC_RESET_CONTROL_CE_RST_MASK);
2522 msleep(10);
2523 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2524 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2525}
2526
2527static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2528{
2529 u32 val;
2530
2531 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2532 SOC_LF_TIMER_CONTROL0_ADDRESS);
2533 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2534 SOC_LF_TIMER_CONTROL0_ADDRESS,
2535 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2536}
2537
2538static int ath10k_pci_warm_reset(struct ath10k *ar)
2539{
2540 int ret;
2541
2542 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2543
2544 spin_lock_bh(&ar->data_lock);
2545 ar->stats.fw_warm_reset_counter++;
2546 spin_unlock_bh(&ar->data_lock);
2547
2548 ath10k_pci_irq_disable(ar);
2549
2550
2551
2552
2553
2554
2555 ath10k_pci_warm_reset_si0(ar);
2556 ath10k_pci_warm_reset_cpu(ar);
2557 ath10k_pci_init_pipes(ar);
2558 ath10k_pci_wait_for_target_init(ar);
2559
2560 ath10k_pci_warm_reset_clear_lf(ar);
2561 ath10k_pci_warm_reset_ce(ar);
2562 ath10k_pci_warm_reset_cpu(ar);
2563 ath10k_pci_init_pipes(ar);
2564
2565 ret = ath10k_pci_wait_for_target_init(ar);
2566 if (ret) {
2567 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2568 return ret;
2569 }
2570
2571 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2572
2573 return 0;
2574}
2575
2576static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2577{
2578 ath10k_pci_irq_disable(ar);
2579 return ath10k_pci_qca99x0_chip_reset(ar);
2580}
2581
2582static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2583{
2584 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2585
2586 if (!ar_pci->pci_soft_reset)
2587 return -ENOTSUPP;
2588
2589 return ar_pci->pci_soft_reset(ar);
2590}
2591
2592static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2593{
2594 int i, ret;
2595 u32 val;
2596
2597 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2598
2599
2600
2601
2602
2603
2604
2605
2606 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2607 ret = ath10k_pci_warm_reset(ar);
2608 if (ret) {
2609 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2610 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2611 ret);
2612 continue;
2613 }
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624 ret = ath10k_pci_init_pipes(ar);
2625 if (ret) {
2626 ath10k_warn(ar, "failed to init copy engine: %d\n",
2627 ret);
2628 continue;
2629 }
2630
2631 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2632 &val);
2633 if (ret) {
2634 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2635 ret);
2636 continue;
2637 }
2638
2639 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2640 return 0;
2641 }
2642
2643 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2644 ath10k_warn(ar, "refusing cold reset as requested\n");
2645 return -EPERM;
2646 }
2647
2648 ret = ath10k_pci_cold_reset(ar);
2649 if (ret) {
2650 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2651 return ret;
2652 }
2653
2654 ret = ath10k_pci_wait_for_target_init(ar);
2655 if (ret) {
2656 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2657 ret);
2658 return ret;
2659 }
2660
2661 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2662
2663 return 0;
2664}
2665
2666static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2667{
2668 int ret;
2669
2670 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2671
2672
2673
2674 ret = ath10k_pci_cold_reset(ar);
2675 if (ret) {
2676 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2677 return ret;
2678 }
2679
2680 ret = ath10k_pci_wait_for_target_init(ar);
2681 if (ret) {
2682 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2683 ret);
2684 return ret;
2685 }
2686
2687 ret = ath10k_pci_warm_reset(ar);
2688 if (ret) {
2689 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2690 return ret;
2691 }
2692
2693 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2694
2695 return 0;
2696}
2697
2698static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2699{
2700 int ret;
2701
2702 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2703
2704 ret = ath10k_pci_cold_reset(ar);
2705 if (ret) {
2706 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2707 return ret;
2708 }
2709
2710 ret = ath10k_pci_wait_for_target_init(ar);
2711 if (ret) {
2712 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2713 ret);
2714 return ret;
2715 }
2716
2717 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2718
2719 return 0;
2720}
2721
2722static int ath10k_pci_chip_reset(struct ath10k *ar)
2723{
2724 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2725
2726 if (WARN_ON(!ar_pci->pci_hard_reset))
2727 return -ENOTSUPP;
2728
2729 return ar_pci->pci_hard_reset(ar);
2730}
2731
2732static int ath10k_pci_hif_power_up(struct ath10k *ar)
2733{
2734 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2735 int ret;
2736
2737 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2738
2739 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2740 &ar_pci->link_ctl);
2741 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2742 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754 ret = ath10k_pci_chip_reset(ar);
2755 if (ret) {
2756 if (ath10k_pci_has_fw_crashed(ar)) {
2757 ath10k_warn(ar, "firmware crashed during chip reset\n");
2758 ath10k_pci_fw_crashed_clear(ar);
2759 ath10k_pci_fw_crashed_dump(ar);
2760 }
2761
2762 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2763 goto err_sleep;
2764 }
2765
2766 ret = ath10k_pci_init_pipes(ar);
2767 if (ret) {
2768 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2769 goto err_sleep;
2770 }
2771
2772 ret = ath10k_pci_init_config(ar);
2773 if (ret) {
2774 ath10k_err(ar, "failed to setup init config: %d\n", ret);
2775 goto err_ce;
2776 }
2777
2778 ret = ath10k_pci_wake_target_cpu(ar);
2779 if (ret) {
2780 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2781 goto err_ce;
2782 }
2783
2784 return 0;
2785
2786err_ce:
2787 ath10k_pci_ce_deinit(ar);
2788
2789err_sleep:
2790 return ret;
2791}
2792
2793void ath10k_pci_hif_power_down(struct ath10k *ar)
2794{
2795 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2796
2797
2798
2799
2800}
2801
2802static int ath10k_pci_hif_suspend(struct ath10k *ar)
2803{
2804
2805 return 0;
2806}
2807
2808static int ath10k_pci_suspend(struct ath10k *ar)
2809{
2810
2811
2812
2813
2814
2815 ath10k_pci_sleep_sync(ar);
2816
2817 return 0;
2818}
2819
2820static int ath10k_pci_hif_resume(struct ath10k *ar)
2821{
2822
2823 return 0;
2824}
2825
2826static int ath10k_pci_resume(struct ath10k *ar)
2827{
2828 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2829 struct pci_dev *pdev = ar_pci->pdev;
2830 u32 val;
2831 int ret = 0;
2832
2833 ret = ath10k_pci_force_wake(ar);
2834 if (ret) {
2835 ath10k_err(ar, "failed to wake up target: %d\n", ret);
2836 return ret;
2837 }
2838
2839
2840
2841
2842
2843
2844 pci_read_config_dword(pdev, 0x40, &val);
2845 if ((val & 0x0000ff00) != 0)
2846 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2847
2848 return ret;
2849}
2850
2851static bool ath10k_pci_validate_cal(void *data, size_t size)
2852{
2853 __le16 *cal_words = data;
2854 u16 checksum = 0;
2855 size_t i;
2856
2857 if (size % 2 != 0)
2858 return false;
2859
2860 for (i = 0; i < size / 2; i++)
2861 checksum ^= le16_to_cpu(cal_words[i]);
2862
2863 return checksum == 0xffff;
2864}
2865
2866static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2867{
2868
2869 ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2870
2871
2872 ath10k_pci_write32(ar,
2873 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2874 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2875 SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2876 GPIO_PIN0_CONFIG) |
2877 SM(1, GPIO_PIN0_PAD_PULL));
2878
2879 ath10k_pci_write32(ar,
2880 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2881 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2882 SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2883 SM(1, GPIO_PIN0_PAD_PULL));
2884
2885 ath10k_pci_write32(ar,
2886 GPIO_BASE_ADDRESS +
2887 QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2888 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2889
2890
2891 ath10k_pci_write32(ar,
2892 SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2893 SM(1, SI_CONFIG_ERR_INT) |
2894 SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2895 SM(1, SI_CONFIG_I2C) |
2896 SM(1, SI_CONFIG_POS_SAMPLE) |
2897 SM(1, SI_CONFIG_INACTIVE_DATA) |
2898 SM(1, SI_CONFIG_INACTIVE_CLK) |
2899 SM(8, SI_CONFIG_DIVIDER));
2900}
2901
2902static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2903{
2904 u32 reg;
2905 int wait_limit;
2906
2907
2908 reg = QCA9887_EEPROM_SELECT_READ |
2909 SM(addr, QCA9887_EEPROM_ADDR_LO) |
2910 SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2911 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2912
2913
2914 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2915 SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2916 SM(4, SI_CS_TX_CNT));
2917
2918
2919 wait_limit = 100000;
2920
2921
2922 do {
2923 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
2924 if (MS(reg, SI_CS_DONE_INT))
2925 break;
2926
2927 wait_limit--;
2928 udelay(10);
2929 } while (wait_limit > 0);
2930
2931 if (!MS(reg, SI_CS_DONE_INT)) {
2932 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
2933 addr);
2934 return -ETIMEDOUT;
2935 }
2936
2937
2938 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
2939
2940 if (MS(reg, SI_CS_DONE_ERR)) {
2941 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
2942 return -EIO;
2943 }
2944
2945
2946 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
2947 *out = reg;
2948
2949 return 0;
2950}
2951
2952static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
2953 size_t *data_len)
2954{
2955 u8 *caldata = NULL;
2956 size_t calsize, i;
2957 int ret;
2958
2959 if (!QCA_REV_9887(ar))
2960 return -EOPNOTSUPP;
2961
2962 calsize = ar->hw_params.cal_data_len;
2963 caldata = kmalloc(calsize, GFP_KERNEL);
2964 if (!caldata)
2965 return -ENOMEM;
2966
2967 ath10k_pci_enable_eeprom(ar);
2968
2969 for (i = 0; i < calsize; i++) {
2970 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
2971 if (ret)
2972 goto err_free;
2973 }
2974
2975 if (!ath10k_pci_validate_cal(caldata, calsize))
2976 goto err_free;
2977
2978 *data = caldata;
2979 *data_len = calsize;
2980
2981 return 0;
2982
2983err_free:
2984 kfree(caldata);
2985
2986 return -EINVAL;
2987}
2988
2989static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2990 .tx_sg = ath10k_pci_hif_tx_sg,
2991 .diag_read = ath10k_pci_hif_diag_read,
2992 .diag_write = ath10k_pci_diag_write_mem,
2993 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2994 .start = ath10k_pci_hif_start,
2995 .stop = ath10k_pci_hif_stop,
2996 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2997 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2998 .send_complete_check = ath10k_pci_hif_send_complete_check,
2999 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
3000 .power_up = ath10k_pci_hif_power_up,
3001 .power_down = ath10k_pci_hif_power_down,
3002 .read32 = ath10k_pci_read32,
3003 .write32 = ath10k_pci_write32,
3004 .suspend = ath10k_pci_hif_suspend,
3005 .resume = ath10k_pci_hif_resume,
3006 .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
3007};
3008
3009
3010
3011
3012
3013
3014static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3015{
3016 struct ath10k *ar = arg;
3017 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3018 int ret;
3019
3020 if (ath10k_pci_has_device_gone(ar))
3021 return IRQ_NONE;
3022
3023 ret = ath10k_pci_force_wake(ar);
3024 if (ret) {
3025 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3026 return IRQ_NONE;
3027 }
3028
3029 if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3030 !ath10k_pci_irq_pending(ar))
3031 return IRQ_NONE;
3032
3033 ath10k_pci_disable_and_clear_legacy_irq(ar);
3034 ath10k_pci_irq_msi_fw_mask(ar);
3035 napi_schedule(&ar->napi);
3036
3037 return IRQ_HANDLED;
3038}
3039
3040static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3041{
3042 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3043 int done = 0;
3044
3045 if (ath10k_pci_has_fw_crashed(ar)) {
3046 ath10k_pci_fw_crashed_clear(ar);
3047 ath10k_pci_fw_crashed_dump(ar);
3048 napi_complete(ctx);
3049 return done;
3050 }
3051
3052 ath10k_ce_per_engine_service_any(ar);
3053
3054 done = ath10k_htt_txrx_compl_task(ar, budget);
3055
3056 if (done < budget) {
3057 napi_complete_done(ctx, done);
3058
3059
3060
3061
3062
3063
3064
3065
3066 if (ath10k_ce_interrupt_summary(ar)) {
3067 napi_reschedule(ctx);
3068 goto out;
3069 }
3070 ath10k_pci_enable_legacy_irq(ar);
3071 ath10k_pci_irq_msi_fw_unmask(ar);
3072 }
3073
3074out:
3075 return done;
3076}
3077
3078static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3079{
3080 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3081 int ret;
3082
3083 ret = request_irq(ar_pci->pdev->irq,
3084 ath10k_pci_interrupt_handler,
3085 IRQF_SHARED, "ath10k_pci", ar);
3086 if (ret) {
3087 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3088 ar_pci->pdev->irq, ret);
3089 return ret;
3090 }
3091
3092 return 0;
3093}
3094
3095static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
3096{
3097 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3098 int ret;
3099
3100 ret = request_irq(ar_pci->pdev->irq,
3101 ath10k_pci_interrupt_handler,
3102 IRQF_SHARED, "ath10k_pci", ar);
3103 if (ret) {
3104 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3105 ar_pci->pdev->irq, ret);
3106 return ret;
3107 }
3108
3109 return 0;
3110}
3111
3112static int ath10k_pci_request_irq(struct ath10k *ar)
3113{
3114 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3115
3116 switch (ar_pci->oper_irq_mode) {
3117 case ATH10K_PCI_IRQ_LEGACY:
3118 return ath10k_pci_request_irq_legacy(ar);
3119 case ATH10K_PCI_IRQ_MSI:
3120 return ath10k_pci_request_irq_msi(ar);
3121 default:
3122 return -EINVAL;
3123 }
3124}
3125
3126static void ath10k_pci_free_irq(struct ath10k *ar)
3127{
3128 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3129
3130 free_irq(ar_pci->pdev->irq, ar);
3131}
3132
3133void ath10k_pci_init_napi(struct ath10k *ar)
3134{
3135 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
3136 ATH10K_NAPI_BUDGET);
3137}
3138
3139static int ath10k_pci_init_irq(struct ath10k *ar)
3140{
3141 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3142 int ret;
3143
3144 ath10k_pci_init_napi(ar);
3145
3146 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3147 ath10k_info(ar, "limiting irq mode to: %d\n",
3148 ath10k_pci_irq_mode);
3149
3150
3151 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
3152 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3153 ret = pci_enable_msi(ar_pci->pdev);
3154 if (ret == 0)
3155 return 0;
3156
3157
3158 }
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
3170
3171 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3172 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3173
3174 return 0;
3175}
3176
3177static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
3178{
3179 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3180 0);
3181}
3182
3183static int ath10k_pci_deinit_irq(struct ath10k *ar)
3184{
3185 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3186
3187 switch (ar_pci->oper_irq_mode) {
3188 case ATH10K_PCI_IRQ_LEGACY:
3189 ath10k_pci_deinit_irq_legacy(ar);
3190 break;
3191 default:
3192 pci_disable_msi(ar_pci->pdev);
3193 break;
3194 }
3195
3196 return 0;
3197}
3198
3199int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3200{
3201 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3202 unsigned long timeout;
3203 u32 val;
3204
3205 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3206
3207 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3208
3209 do {
3210 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3211
3212 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3213 val);
3214
3215
3216 if (val == 0xffffffff)
3217 continue;
3218
3219
3220 if (val & FW_IND_EVENT_PENDING)
3221 break;
3222
3223 if (val & FW_IND_INITIALIZED)
3224 break;
3225
3226 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3227
3228 ath10k_pci_enable_legacy_irq(ar);
3229
3230 mdelay(10);
3231 } while (time_before(jiffies, timeout));
3232
3233 ath10k_pci_disable_and_clear_legacy_irq(ar);
3234 ath10k_pci_irq_msi_fw_mask(ar);
3235
3236 if (val == 0xffffffff) {
3237 ath10k_err(ar, "failed to read device register, device is gone\n");
3238 return -EIO;
3239 }
3240
3241 if (val & FW_IND_EVENT_PENDING) {
3242 ath10k_warn(ar, "device has crashed during init\n");
3243 return -ECOMM;
3244 }
3245
3246 if (!(val & FW_IND_INITIALIZED)) {
3247 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3248 val);
3249 return -ETIMEDOUT;
3250 }
3251
3252 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3253 return 0;
3254}
3255
3256static int ath10k_pci_cold_reset(struct ath10k *ar)
3257{
3258 u32 val;
3259
3260 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3261
3262 spin_lock_bh(&ar->data_lock);
3263
3264 ar->stats.fw_cold_reset_counter++;
3265
3266 spin_unlock_bh(&ar->data_lock);
3267
3268
3269 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3270 val |= 1;
3271 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3272
3273
3274
3275
3276
3277
3278 msleep(20);
3279
3280
3281 val &= ~1;
3282 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3283
3284 msleep(20);
3285
3286 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3287
3288 return 0;
3289}
3290
3291static int ath10k_pci_claim(struct ath10k *ar)
3292{
3293 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3294 struct pci_dev *pdev = ar_pci->pdev;
3295 int ret;
3296
3297 pci_set_drvdata(pdev, ar);
3298
3299 ret = pci_enable_device(pdev);
3300 if (ret) {
3301 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3302 return ret;
3303 }
3304
3305 ret = pci_request_region(pdev, BAR_NUM, "ath");
3306 if (ret) {
3307 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3308 ret);
3309 goto err_device;
3310 }
3311
3312
3313 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3314 if (ret) {
3315 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3316 goto err_region;
3317 }
3318
3319 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3320 if (ret) {
3321 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
3322 ret);
3323 goto err_region;
3324 }
3325
3326 pci_set_master(pdev);
3327
3328
3329 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3330 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3331 if (!ar_pci->mem) {
3332 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3333 ret = -EIO;
3334 goto err_master;
3335 }
3336
3337 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3338 return 0;
3339
3340err_master:
3341 pci_clear_master(pdev);
3342
3343err_region:
3344 pci_release_region(pdev, BAR_NUM);
3345
3346err_device:
3347 pci_disable_device(pdev);
3348
3349 return ret;
3350}
3351
3352static void ath10k_pci_release(struct ath10k *ar)
3353{
3354 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3355 struct pci_dev *pdev = ar_pci->pdev;
3356
3357 pci_iounmap(pdev, ar_pci->mem);
3358 pci_release_region(pdev, BAR_NUM);
3359 pci_clear_master(pdev);
3360 pci_disable_device(pdev);
3361}
3362
3363static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3364{
3365 const struct ath10k_pci_supp_chip *supp_chip;
3366 int i;
3367 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3368
3369 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3370 supp_chip = &ath10k_pci_supp_chips[i];
3371
3372 if (supp_chip->dev_id == dev_id &&
3373 supp_chip->rev_id == rev_id)
3374 return true;
3375 }
3376
3377 return false;
3378}
3379
3380int ath10k_pci_setup_resource(struct ath10k *ar)
3381{
3382 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3383 struct ath10k_ce *ce = ath10k_ce_priv(ar);
3384 int ret;
3385
3386 spin_lock_init(&ce->ce_lock);
3387 spin_lock_init(&ar_pci->ps_lock);
3388
3389 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3390
3391 if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3392 ath10k_pci_override_ce_config(ar);
3393
3394 ret = ath10k_pci_alloc_pipes(ar);
3395 if (ret) {
3396 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3397 ret);
3398 return ret;
3399 }
3400
3401 return 0;
3402}
3403
3404void ath10k_pci_release_resource(struct ath10k *ar)
3405{
3406 ath10k_pci_rx_retry_sync(ar);
3407 netif_napi_del(&ar->napi);
3408 ath10k_pci_ce_deinit(ar);
3409 ath10k_pci_free_pipes(ar);
3410}
3411
3412static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3413 .read32 = ath10k_bus_pci_read32,
3414 .write32 = ath10k_bus_pci_write32,
3415 .get_num_banks = ath10k_pci_get_num_banks,
3416};
3417
3418static int ath10k_pci_probe(struct pci_dev *pdev,
3419 const struct pci_device_id *pci_dev)
3420{
3421 int ret = 0;
3422 struct ath10k *ar;
3423 struct ath10k_pci *ar_pci;
3424 enum ath10k_hw_rev hw_rev;
3425 u32 chip_id;
3426 bool pci_ps;
3427 int (*pci_soft_reset)(struct ath10k *ar);
3428 int (*pci_hard_reset)(struct ath10k *ar);
3429 u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3430
3431 switch (pci_dev->device) {
3432 case QCA988X_2_0_DEVICE_ID_UBNT:
3433 case QCA988X_2_0_DEVICE_ID:
3434 hw_rev = ATH10K_HW_QCA988X;
3435 pci_ps = false;
3436 pci_soft_reset = ath10k_pci_warm_reset;
3437 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3438 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3439 break;
3440 case QCA9887_1_0_DEVICE_ID:
3441 hw_rev = ATH10K_HW_QCA9887;
3442 pci_ps = false;
3443 pci_soft_reset = ath10k_pci_warm_reset;
3444 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3445 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3446 break;
3447 case QCA6164_2_1_DEVICE_ID:
3448 case QCA6174_2_1_DEVICE_ID:
3449 hw_rev = ATH10K_HW_QCA6174;
3450 pci_ps = true;
3451 pci_soft_reset = ath10k_pci_warm_reset;
3452 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3453 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3454 break;
3455 case QCA99X0_2_0_DEVICE_ID:
3456 hw_rev = ATH10K_HW_QCA99X0;
3457 pci_ps = false;
3458 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3459 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3460 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3461 break;
3462 case QCA9984_1_0_DEVICE_ID:
3463 hw_rev = ATH10K_HW_QCA9984;
3464 pci_ps = false;
3465 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3466 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3467 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3468 break;
3469 case QCA9888_2_0_DEVICE_ID:
3470 hw_rev = ATH10K_HW_QCA9888;
3471 pci_ps = false;
3472 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3473 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3474 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3475 break;
3476 case QCA9377_1_0_DEVICE_ID:
3477 hw_rev = ATH10K_HW_QCA9377;
3478 pci_ps = true;
3479 pci_soft_reset = NULL;
3480 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3481 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3482 break;
3483 default:
3484 WARN_ON(1);
3485 return -ENOTSUPP;
3486 }
3487
3488 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3489 hw_rev, &ath10k_pci_hif_ops);
3490 if (!ar) {
3491 dev_err(&pdev->dev, "failed to allocate core\n");
3492 return -ENOMEM;
3493 }
3494
3495 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3496 pdev->vendor, pdev->device,
3497 pdev->subsystem_vendor, pdev->subsystem_device);
3498
3499 ar_pci = ath10k_pci_priv(ar);
3500 ar_pci->pdev = pdev;
3501 ar_pci->dev = &pdev->dev;
3502 ar_pci->ar = ar;
3503 ar->dev_id = pci_dev->device;
3504 ar_pci->pci_ps = pci_ps;
3505 ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3506 ar_pci->pci_soft_reset = pci_soft_reset;
3507 ar_pci->pci_hard_reset = pci_hard_reset;
3508 ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3509 ar->ce_priv = &ar_pci->ce;
3510
3511 ar->id.vendor = pdev->vendor;
3512 ar->id.device = pdev->device;
3513 ar->id.subsystem_vendor = pdev->subsystem_vendor;
3514 ar->id.subsystem_device = pdev->subsystem_device;
3515
3516 timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
3517
3518 ret = ath10k_pci_setup_resource(ar);
3519 if (ret) {
3520 ath10k_err(ar, "failed to setup resource: %d\n", ret);
3521 goto err_core_destroy;
3522 }
3523
3524 ret = ath10k_pci_claim(ar);
3525 if (ret) {
3526 ath10k_err(ar, "failed to claim device: %d\n", ret);
3527 goto err_free_pipes;
3528 }
3529
3530 ret = ath10k_pci_force_wake(ar);
3531 if (ret) {
3532 ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3533 goto err_sleep;
3534 }
3535
3536 ath10k_pci_ce_deinit(ar);
3537 ath10k_pci_irq_disable(ar);
3538
3539 ret = ath10k_pci_init_irq(ar);
3540 if (ret) {
3541 ath10k_err(ar, "failed to init irqs: %d\n", ret);
3542 goto err_sleep;
3543 }
3544
3545 ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3546 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3547 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3548
3549 ret = ath10k_pci_request_irq(ar);
3550 if (ret) {
3551 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3552 goto err_deinit_irq;
3553 }
3554
3555 ret = ath10k_pci_chip_reset(ar);
3556 if (ret) {
3557 ath10k_err(ar, "failed to reset chip: %d\n", ret);
3558 goto err_free_irq;
3559 }
3560
3561 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3562 if (chip_id == 0xffffffff) {
3563 ath10k_err(ar, "failed to get chip id\n");
3564 goto err_free_irq;
3565 }
3566
3567 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
3568 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3569 pdev->device, chip_id);
3570 goto err_free_irq;
3571 }
3572
3573 ret = ath10k_core_register(ar, chip_id);
3574 if (ret) {
3575 ath10k_err(ar, "failed to register driver core: %d\n", ret);
3576 goto err_free_irq;
3577 }
3578
3579 return 0;
3580
3581err_free_irq:
3582 ath10k_pci_free_irq(ar);
3583 ath10k_pci_rx_retry_sync(ar);
3584
3585err_deinit_irq:
3586 ath10k_pci_deinit_irq(ar);
3587
3588err_sleep:
3589 ath10k_pci_sleep_sync(ar);
3590 ath10k_pci_release(ar);
3591
3592err_free_pipes:
3593 ath10k_pci_free_pipes(ar);
3594
3595err_core_destroy:
3596 ath10k_core_destroy(ar);
3597
3598 return ret;
3599}
3600
3601static void ath10k_pci_remove(struct pci_dev *pdev)
3602{
3603 struct ath10k *ar = pci_get_drvdata(pdev);
3604 struct ath10k_pci *ar_pci;
3605
3606 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3607
3608 if (!ar)
3609 return;
3610
3611 ar_pci = ath10k_pci_priv(ar);
3612
3613 if (!ar_pci)
3614 return;
3615
3616 ath10k_core_unregister(ar);
3617 ath10k_pci_free_irq(ar);
3618 ath10k_pci_deinit_irq(ar);
3619 ath10k_pci_release_resource(ar);
3620 ath10k_pci_sleep_sync(ar);
3621 ath10k_pci_release(ar);
3622 ath10k_core_destroy(ar);
3623}
3624
3625MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3626
3627static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3628{
3629 struct ath10k *ar = dev_get_drvdata(dev);
3630 int ret;
3631
3632 ret = ath10k_pci_suspend(ar);
3633 if (ret)
3634 ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3635
3636 return ret;
3637}
3638
3639static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3640{
3641 struct ath10k *ar = dev_get_drvdata(dev);
3642 int ret;
3643
3644 ret = ath10k_pci_resume(ar);
3645 if (ret)
3646 ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3647
3648 return ret;
3649}
3650
3651static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3652 ath10k_pci_pm_suspend,
3653 ath10k_pci_pm_resume);
3654
3655static struct pci_driver ath10k_pci_driver = {
3656 .name = "ath10k_pci",
3657 .id_table = ath10k_pci_id_table,
3658 .probe = ath10k_pci_probe,
3659 .remove = ath10k_pci_remove,
3660#ifdef CONFIG_PM
3661 .driver.pm = &ath10k_pci_pm_ops,
3662#endif
3663};
3664
3665static int __init ath10k_pci_init(void)
3666{
3667 int ret;
3668
3669 ret = pci_register_driver(&ath10k_pci_driver);
3670 if (ret)
3671 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3672 ret);
3673
3674 ret = ath10k_ahb_init();
3675 if (ret)
3676 printk(KERN_ERR "ahb init failed: %d\n", ret);
3677
3678 return ret;
3679}
3680module_init(ath10k_pci_init);
3681
3682static void __exit ath10k_pci_exit(void)
3683{
3684 pci_unregister_driver(&ath10k_pci_driver);
3685 ath10k_ahb_exit();
3686}
3687
3688module_exit(ath10k_pci_exit);
3689
3690MODULE_AUTHOR("Qualcomm Atheros");
3691MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3692MODULE_LICENSE("Dual BSD/GPL");
3693
3694
3695MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3696MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3697MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3698MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3699MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3700MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3701
3702
3703MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3704MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3705MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3706
3707
3708MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3709MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3710MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3711MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3712
3713
3714MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3715MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3716MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3717MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3718MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3719
3720
3721MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3722MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
3723