1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
22#include <linux/bitops.h>
23
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
36enum ath10k_pci_reset_mode {
37 ATH10K_PCI_RESET_AUTO = 0,
38 ATH10K_PCI_RESET_WARM_ONLY = 1,
39};
40
41static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
42static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
43
44module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
45MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
46
47module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
48MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
49
50
51#define ATH10K_PCI_TARGET_WAIT 3000
52#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
53
54static const struct pci_device_id ath10k_pci_id_table[] = {
55 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) },
56 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) },
57 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) },
58 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) },
59 { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) },
60 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) },
61 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) },
62 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) },
63 {0}
64};
65
66static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
67
68
69
70
71 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
72
73 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
74 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
75 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
76 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
77 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
78
79 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
80 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
81 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
82 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
83 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
84
85 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
86
87 { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
88
89 { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
90
91 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
92 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
93
94 { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
95};
96
97static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
98static int ath10k_pci_cold_reset(struct ath10k *ar);
99static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
100static int ath10k_pci_init_irq(struct ath10k *ar);
101static int ath10k_pci_deinit_irq(struct ath10k *ar);
102static int ath10k_pci_request_irq(struct ath10k *ar);
103static void ath10k_pci_free_irq(struct ath10k *ar);
104static int ath10k_pci_bmi_wait(struct ath10k *ar,
105 struct ath10k_ce_pipe *tx_pipe,
106 struct ath10k_ce_pipe *rx_pipe,
107 struct bmi_xfer *xfer);
108static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
109static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
110static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
111static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
112static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
113static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
114static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
115
116static struct ce_attr host_ce_config_wlan[] = {
117
118 {
119 .flags = CE_ATTR_FLAGS,
120 .src_nentries = 16,
121 .src_sz_max = 256,
122 .dest_nentries = 0,
123 .send_cb = ath10k_pci_htc_tx_cb,
124 },
125
126
127 {
128 .flags = CE_ATTR_FLAGS,
129 .src_nentries = 0,
130 .src_sz_max = 2048,
131 .dest_nentries = 512,
132 .recv_cb = ath10k_pci_htt_htc_rx_cb,
133 },
134
135
136 {
137 .flags = CE_ATTR_FLAGS,
138 .src_nentries = 0,
139 .src_sz_max = 2048,
140 .dest_nentries = 128,
141 .recv_cb = ath10k_pci_htc_rx_cb,
142 },
143
144
145 {
146 .flags = CE_ATTR_FLAGS,
147 .src_nentries = 32,
148 .src_sz_max = 2048,
149 .dest_nentries = 0,
150 .send_cb = ath10k_pci_htc_tx_cb,
151 },
152
153
154 {
155 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
156 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
157 .src_sz_max = 256,
158 .dest_nentries = 0,
159 .send_cb = ath10k_pci_htt_tx_cb,
160 },
161
162
163 {
164 .flags = CE_ATTR_FLAGS,
165 .src_nentries = 0,
166 .src_sz_max = 512,
167 .dest_nentries = 512,
168 .recv_cb = ath10k_pci_htt_rx_cb,
169 },
170
171
172 {
173 .flags = CE_ATTR_FLAGS,
174 .src_nentries = 0,
175 .src_sz_max = 0,
176 .dest_nentries = 0,
177 },
178
179
180 {
181 .flags = CE_ATTR_FLAGS,
182 .src_nentries = 2,
183 .src_sz_max = DIAG_TRANSFER_LIMIT,
184 .dest_nentries = 2,
185 },
186
187
188 {
189 .flags = CE_ATTR_FLAGS,
190 .src_nentries = 0,
191 .src_sz_max = 2048,
192 .dest_nentries = 128,
193 .recv_cb = ath10k_pci_pktlog_rx_cb,
194 },
195
196
197 {
198 .flags = CE_ATTR_FLAGS,
199 .src_nentries = 0,
200 .src_sz_max = 0,
201 .dest_nentries = 0,
202 },
203
204
205 {
206 .flags = CE_ATTR_FLAGS,
207 .src_nentries = 0,
208 .src_sz_max = 0,
209 .dest_nentries = 0,
210 },
211
212
213 {
214 .flags = CE_ATTR_FLAGS,
215 .src_nentries = 0,
216 .src_sz_max = 0,
217 .dest_nentries = 0,
218 },
219};
220
221
222static struct ce_pipe_config target_ce_config_wlan[] = {
223
224 {
225 .pipenum = __cpu_to_le32(0),
226 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
227 .nentries = __cpu_to_le32(32),
228 .nbytes_max = __cpu_to_le32(256),
229 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
230 .reserved = __cpu_to_le32(0),
231 },
232
233
234 {
235 .pipenum = __cpu_to_le32(1),
236 .pipedir = __cpu_to_le32(PIPEDIR_IN),
237 .nentries = __cpu_to_le32(32),
238 .nbytes_max = __cpu_to_le32(2048),
239 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
240 .reserved = __cpu_to_le32(0),
241 },
242
243
244 {
245 .pipenum = __cpu_to_le32(2),
246 .pipedir = __cpu_to_le32(PIPEDIR_IN),
247 .nentries = __cpu_to_le32(64),
248 .nbytes_max = __cpu_to_le32(2048),
249 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
250 .reserved = __cpu_to_le32(0),
251 },
252
253
254 {
255 .pipenum = __cpu_to_le32(3),
256 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
257 .nentries = __cpu_to_le32(32),
258 .nbytes_max = __cpu_to_le32(2048),
259 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
260 .reserved = __cpu_to_le32(0),
261 },
262
263
264 {
265 .pipenum = __cpu_to_le32(4),
266 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
267 .nentries = __cpu_to_le32(256),
268 .nbytes_max = __cpu_to_le32(256),
269 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
270 .reserved = __cpu_to_le32(0),
271 },
272
273
274
275
276 {
277 .pipenum = __cpu_to_le32(5),
278 .pipedir = __cpu_to_le32(PIPEDIR_IN),
279 .nentries = __cpu_to_le32(32),
280 .nbytes_max = __cpu_to_le32(512),
281 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
282 .reserved = __cpu_to_le32(0),
283 },
284
285
286 {
287 .pipenum = __cpu_to_le32(6),
288 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
289 .nentries = __cpu_to_le32(32),
290 .nbytes_max = __cpu_to_le32(4096),
291 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
292 .reserved = __cpu_to_le32(0),
293 },
294
295
296 {
297 .pipenum = __cpu_to_le32(7),
298 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
299 .nentries = __cpu_to_le32(0),
300 .nbytes_max = __cpu_to_le32(0),
301 .flags = __cpu_to_le32(0),
302 .reserved = __cpu_to_le32(0),
303 },
304
305
306 {
307 .pipenum = __cpu_to_le32(8),
308 .pipedir = __cpu_to_le32(PIPEDIR_IN),
309 .nentries = __cpu_to_le32(64),
310 .nbytes_max = __cpu_to_le32(2048),
311 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
312 .reserved = __cpu_to_le32(0),
313 },
314
315
316 {
317 .pipenum = __cpu_to_le32(9),
318 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
319 .nentries = __cpu_to_le32(32),
320 .nbytes_max = __cpu_to_le32(2048),
321 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
322 .reserved = __cpu_to_le32(0),
323 },
324
325
326
327
328};
329
330
331
332
333
334
335static struct service_to_pipe target_service_to_ce_map_wlan[] = {
336 {
337 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
338 __cpu_to_le32(PIPEDIR_OUT),
339 __cpu_to_le32(3),
340 },
341 {
342 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
343 __cpu_to_le32(PIPEDIR_IN),
344 __cpu_to_le32(2),
345 },
346 {
347 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
348 __cpu_to_le32(PIPEDIR_OUT),
349 __cpu_to_le32(3),
350 },
351 {
352 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
353 __cpu_to_le32(PIPEDIR_IN),
354 __cpu_to_le32(2),
355 },
356 {
357 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
358 __cpu_to_le32(PIPEDIR_OUT),
359 __cpu_to_le32(3),
360 },
361 {
362 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
363 __cpu_to_le32(PIPEDIR_IN),
364 __cpu_to_le32(2),
365 },
366 {
367 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
368 __cpu_to_le32(PIPEDIR_OUT),
369 __cpu_to_le32(3),
370 },
371 {
372 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
373 __cpu_to_le32(PIPEDIR_IN),
374 __cpu_to_le32(2),
375 },
376 {
377 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
378 __cpu_to_le32(PIPEDIR_OUT),
379 __cpu_to_le32(3),
380 },
381 {
382 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
383 __cpu_to_le32(PIPEDIR_IN),
384 __cpu_to_le32(2),
385 },
386 {
387 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
388 __cpu_to_le32(PIPEDIR_OUT),
389 __cpu_to_le32(0),
390 },
391 {
392 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
393 __cpu_to_le32(PIPEDIR_IN),
394 __cpu_to_le32(1),
395 },
396 {
397 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
398 __cpu_to_le32(PIPEDIR_OUT),
399 __cpu_to_le32(0),
400 },
401 {
402 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
403 __cpu_to_le32(PIPEDIR_IN),
404 __cpu_to_le32(1),
405 },
406 {
407 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
408 __cpu_to_le32(PIPEDIR_OUT),
409 __cpu_to_le32(4),
410 },
411 {
412 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
413 __cpu_to_le32(PIPEDIR_IN),
414 __cpu_to_le32(5),
415 },
416
417
418
419 {
420 __cpu_to_le32(0),
421 __cpu_to_le32(0),
422 __cpu_to_le32(0),
423 },
424};
425
426static bool ath10k_pci_is_awake(struct ath10k *ar)
427{
428 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
429 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
430 RTC_STATE_ADDRESS);
431
432 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
433}
434
435static void __ath10k_pci_wake(struct ath10k *ar)
436{
437 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
438
439 lockdep_assert_held(&ar_pci->ps_lock);
440
441 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
442 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
443
444 iowrite32(PCIE_SOC_WAKE_V_MASK,
445 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
446 PCIE_SOC_WAKE_ADDRESS);
447}
448
449static void __ath10k_pci_sleep(struct ath10k *ar)
450{
451 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
452
453 lockdep_assert_held(&ar_pci->ps_lock);
454
455 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
456 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
457
458 iowrite32(PCIE_SOC_WAKE_RESET,
459 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
460 PCIE_SOC_WAKE_ADDRESS);
461 ar_pci->ps_awake = false;
462}
463
464static int ath10k_pci_wake_wait(struct ath10k *ar)
465{
466 int tot_delay = 0;
467 int curr_delay = 5;
468
469 while (tot_delay < PCIE_WAKE_TIMEOUT) {
470 if (ath10k_pci_is_awake(ar)) {
471 if (tot_delay > PCIE_WAKE_LATE_US)
472 ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
473 tot_delay / 1000);
474 return 0;
475 }
476
477 udelay(curr_delay);
478 tot_delay += curr_delay;
479
480 if (curr_delay < 50)
481 curr_delay += 5;
482 }
483
484 return -ETIMEDOUT;
485}
486
487static int ath10k_pci_force_wake(struct ath10k *ar)
488{
489 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
490 unsigned long flags;
491 int ret = 0;
492
493 if (ar_pci->pci_ps)
494 return ret;
495
496 spin_lock_irqsave(&ar_pci->ps_lock, flags);
497
498 if (!ar_pci->ps_awake) {
499 iowrite32(PCIE_SOC_WAKE_V_MASK,
500 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
501 PCIE_SOC_WAKE_ADDRESS);
502
503 ret = ath10k_pci_wake_wait(ar);
504 if (ret == 0)
505 ar_pci->ps_awake = true;
506 }
507
508 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
509
510 return ret;
511}
512
513static void ath10k_pci_force_sleep(struct ath10k *ar)
514{
515 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
516 unsigned long flags;
517
518 spin_lock_irqsave(&ar_pci->ps_lock, flags);
519
520 iowrite32(PCIE_SOC_WAKE_RESET,
521 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
522 PCIE_SOC_WAKE_ADDRESS);
523 ar_pci->ps_awake = false;
524
525 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
526}
527
528static int ath10k_pci_wake(struct ath10k *ar)
529{
530 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
531 unsigned long flags;
532 int ret = 0;
533
534 if (ar_pci->pci_ps == 0)
535 return ret;
536
537 spin_lock_irqsave(&ar_pci->ps_lock, flags);
538
539 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
540 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
541
542
543
544
545 if (!ar_pci->ps_awake) {
546 __ath10k_pci_wake(ar);
547
548 ret = ath10k_pci_wake_wait(ar);
549 if (ret == 0)
550 ar_pci->ps_awake = true;
551 }
552
553 if (ret == 0) {
554 ar_pci->ps_wake_refcount++;
555 WARN_ON(ar_pci->ps_wake_refcount == 0);
556 }
557
558 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
559
560 return ret;
561}
562
563static void ath10k_pci_sleep(struct ath10k *ar)
564{
565 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
566 unsigned long flags;
567
568 if (ar_pci->pci_ps == 0)
569 return;
570
571 spin_lock_irqsave(&ar_pci->ps_lock, flags);
572
573 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
574 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
575
576 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
577 goto skip;
578
579 ar_pci->ps_wake_refcount--;
580
581 mod_timer(&ar_pci->ps_timer, jiffies +
582 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
583
584skip:
585 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
586}
587
588static void ath10k_pci_ps_timer(unsigned long ptr)
589{
590 struct ath10k *ar = (void *)ptr;
591 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
592 unsigned long flags;
593
594 spin_lock_irqsave(&ar_pci->ps_lock, flags);
595
596 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
597 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
598
599 if (ar_pci->ps_wake_refcount > 0)
600 goto skip;
601
602 __ath10k_pci_sleep(ar);
603
604skip:
605 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
606}
607
608static void ath10k_pci_sleep_sync(struct ath10k *ar)
609{
610 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
611 unsigned long flags;
612
613 if (ar_pci->pci_ps == 0) {
614 ath10k_pci_force_sleep(ar);
615 return;
616 }
617
618 del_timer_sync(&ar_pci->ps_timer);
619
620 spin_lock_irqsave(&ar_pci->ps_lock, flags);
621 WARN_ON(ar_pci->ps_wake_refcount > 0);
622 __ath10k_pci_sleep(ar);
623 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
624}
625
626static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
627{
628 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
629 int ret;
630
631 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
632 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
633 offset, offset + sizeof(value), ar_pci->mem_len);
634 return;
635 }
636
637 ret = ath10k_pci_wake(ar);
638 if (ret) {
639 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
640 value, offset, ret);
641 return;
642 }
643
644 iowrite32(value, ar_pci->mem + offset);
645 ath10k_pci_sleep(ar);
646}
647
648static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
649{
650 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
651 u32 val;
652 int ret;
653
654 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
655 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
656 offset, offset + sizeof(val), ar_pci->mem_len);
657 return 0;
658 }
659
660 ret = ath10k_pci_wake(ar);
661 if (ret) {
662 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
663 offset, ret);
664 return 0xffffffff;
665 }
666
667 val = ioread32(ar_pci->mem + offset);
668 ath10k_pci_sleep(ar);
669
670 return val;
671}
672
673inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
674{
675 struct ath10k_ce *ce = ath10k_ce_priv(ar);
676
677 ce->bus_ops->write32(ar, offset, value);
678}
679
680inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
681{
682 struct ath10k_ce *ce = ath10k_ce_priv(ar);
683
684 return ce->bus_ops->read32(ar, offset);
685}
686
687u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
688{
689 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
690}
691
692void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
693{
694 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
695}
696
697u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
698{
699 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
700}
701
702void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
703{
704 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
705}
706
707bool ath10k_pci_irq_pending(struct ath10k *ar)
708{
709 u32 cause;
710
711
712 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
713 PCIE_INTR_CAUSE_ADDRESS);
714 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
715 return true;
716
717 return false;
718}
719
720void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
721{
722
723
724
725
726 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
727 0);
728 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
729 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
730
731
732
733
734 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
735 PCIE_INTR_ENABLE_ADDRESS);
736}
737
738void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
739{
740 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
741 PCIE_INTR_ENABLE_ADDRESS,
742 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
743
744
745
746
747 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
748 PCIE_INTR_ENABLE_ADDRESS);
749}
750
751static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
752{
753 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
754
755 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
756 return "msi";
757
758 return "legacy";
759}
760
761static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
762{
763 struct ath10k *ar = pipe->hif_ce_state;
764 struct ath10k_ce *ce = ath10k_ce_priv(ar);
765 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
766 struct sk_buff *skb;
767 dma_addr_t paddr;
768 int ret;
769
770 skb = dev_alloc_skb(pipe->buf_sz);
771 if (!skb)
772 return -ENOMEM;
773
774 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
775
776 paddr = dma_map_single(ar->dev, skb->data,
777 skb->len + skb_tailroom(skb),
778 DMA_FROM_DEVICE);
779 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
780 ath10k_warn(ar, "failed to dma map pci rx buf\n");
781 dev_kfree_skb_any(skb);
782 return -EIO;
783 }
784
785 ATH10K_SKB_RXCB(skb)->paddr = paddr;
786
787 spin_lock_bh(&ce->ce_lock);
788 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
789 spin_unlock_bh(&ce->ce_lock);
790 if (ret) {
791 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
792 DMA_FROM_DEVICE);
793 dev_kfree_skb_any(skb);
794 return ret;
795 }
796
797 return 0;
798}
799
800static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
801{
802 struct ath10k *ar = pipe->hif_ce_state;
803 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
804 struct ath10k_ce *ce = ath10k_ce_priv(ar);
805 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
806 int ret, num;
807
808 if (pipe->buf_sz == 0)
809 return;
810
811 if (!ce_pipe->dest_ring)
812 return;
813
814 spin_lock_bh(&ce->ce_lock);
815 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
816 spin_unlock_bh(&ce->ce_lock);
817
818 while (num >= 0) {
819 ret = __ath10k_pci_rx_post_buf(pipe);
820 if (ret) {
821 if (ret == -ENOSPC)
822 break;
823 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
824 mod_timer(&ar_pci->rx_post_retry, jiffies +
825 ATH10K_PCI_RX_POST_RETRY_MS);
826 break;
827 }
828 num--;
829 }
830}
831
832void ath10k_pci_rx_post(struct ath10k *ar)
833{
834 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
835 int i;
836
837 for (i = 0; i < CE_COUNT; i++)
838 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
839}
840
841void ath10k_pci_rx_replenish_retry(unsigned long ptr)
842{
843 struct ath10k *ar = (void *)ptr;
844
845 ath10k_pci_rx_post(ar);
846}
847
848static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
849{
850 u32 val = 0, region = addr & 0xfffff;
851
852 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
853 & 0x7ff) << 21;
854 val |= 0x100000 | region;
855 return val;
856}
857
858static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
859{
860 u32 val = 0, region = addr & 0xfffff;
861
862 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
863 val |= 0x100000 | region;
864 return val;
865}
866
867static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
868{
869 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
870
871 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
872 return -ENOTSUPP;
873
874 return ar_pci->targ_cpu_to_ce_addr(ar, addr);
875}
876
877
878
879
880
881
882static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
883 int nbytes)
884{
885 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
886 struct ath10k_ce *ce = ath10k_ce_priv(ar);
887 int ret = 0;
888 u32 *buf;
889 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
890 struct ath10k_ce_pipe *ce_diag;
891
892 u32 ce_data;
893 dma_addr_t ce_data_base = 0;
894 void *data_buf = NULL;
895 int i;
896
897 spin_lock_bh(&ce->ce_lock);
898
899 ce_diag = ar_pci->ce_diag;
900
901
902
903
904
905
906
907 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
908
909 data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev,
910 alloc_nbytes,
911 &ce_data_base,
912 GFP_ATOMIC);
913
914 if (!data_buf) {
915 ret = -ENOMEM;
916 goto done;
917 }
918
919 remaining_bytes = nbytes;
920 ce_data = ce_data_base;
921 while (remaining_bytes) {
922 nbytes = min_t(unsigned int, remaining_bytes,
923 DIAG_TRANSFER_LIMIT);
924
925 ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
926 if (ret != 0)
927 goto done;
928
929
930
931
932
933
934
935
936
937
938 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
939
940 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
941 0);
942 if (ret)
943 goto done;
944
945 i = 0;
946 while (ath10k_ce_completed_send_next_nolock(ce_diag,
947 NULL) != 0) {
948 mdelay(1);
949 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
950 ret = -EBUSY;
951 goto done;
952 }
953 }
954
955 i = 0;
956 while (ath10k_ce_completed_recv_next_nolock(ce_diag,
957 (void **)&buf,
958 &completed_nbytes)
959 != 0) {
960 mdelay(1);
961
962 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
963 ret = -EBUSY;
964 goto done;
965 }
966 }
967
968 if (nbytes != completed_nbytes) {
969 ret = -EIO;
970 goto done;
971 }
972
973 if (*buf != ce_data) {
974 ret = -EIO;
975 goto done;
976 }
977
978 remaining_bytes -= nbytes;
979 memcpy(data, data_buf, nbytes);
980
981 address += nbytes;
982 data += nbytes;
983 }
984
985done:
986
987 if (data_buf)
988 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
989 ce_data_base);
990
991 spin_unlock_bh(&ce->ce_lock);
992
993 return ret;
994}
995
996static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
997{
998 __le32 val = 0;
999 int ret;
1000
1001 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1002 *value = __le32_to_cpu(val);
1003
1004 return ret;
1005}
1006
1007static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1008 u32 src, u32 len)
1009{
1010 u32 host_addr, addr;
1011 int ret;
1012
1013 host_addr = host_interest_item_address(src);
1014
1015 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1016 if (ret != 0) {
1017 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1018 src, ret);
1019 return ret;
1020 }
1021
1022 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1023 if (ret != 0) {
1024 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1025 addr, len, ret);
1026 return ret;
1027 }
1028
1029 return 0;
1030}
1031
1032#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
1033 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1034
1035int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1036 const void *data, int nbytes)
1037{
1038 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1039 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1040 int ret = 0;
1041 u32 *buf;
1042 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
1043 struct ath10k_ce_pipe *ce_diag;
1044 void *data_buf = NULL;
1045 u32 ce_data;
1046 dma_addr_t ce_data_base = 0;
1047 int i;
1048
1049 spin_lock_bh(&ce->ce_lock);
1050
1051 ce_diag = ar_pci->ce_diag;
1052
1053
1054
1055
1056
1057
1058
1059 orig_nbytes = nbytes;
1060 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1061 orig_nbytes,
1062 &ce_data_base,
1063 GFP_ATOMIC);
1064 if (!data_buf) {
1065 ret = -ENOMEM;
1066 goto done;
1067 }
1068
1069
1070 memcpy(data_buf, data, orig_nbytes);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1083
1084 remaining_bytes = orig_nbytes;
1085 ce_data = ce_data_base;
1086 while (remaining_bytes) {
1087
1088 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1089
1090
1091 ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
1092 if (ret != 0)
1093 goto done;
1094
1095
1096
1097
1098
1099 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
1100 nbytes, 0, 0);
1101 if (ret != 0)
1102 goto done;
1103
1104 i = 0;
1105 while (ath10k_ce_completed_send_next_nolock(ce_diag,
1106 NULL) != 0) {
1107 mdelay(1);
1108
1109 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1110 ret = -EBUSY;
1111 goto done;
1112 }
1113 }
1114
1115 i = 0;
1116 while (ath10k_ce_completed_recv_next_nolock(ce_diag,
1117 (void **)&buf,
1118 &completed_nbytes)
1119 != 0) {
1120 mdelay(1);
1121
1122 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1123 ret = -EBUSY;
1124 goto done;
1125 }
1126 }
1127
1128 if (nbytes != completed_nbytes) {
1129 ret = -EIO;
1130 goto done;
1131 }
1132
1133 if (*buf != address) {
1134 ret = -EIO;
1135 goto done;
1136 }
1137
1138 remaining_bytes -= nbytes;
1139 address += nbytes;
1140 ce_data += nbytes;
1141 }
1142
1143done:
1144 if (data_buf) {
1145 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
1146 ce_data_base);
1147 }
1148
1149 if (ret != 0)
1150 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1151 address, ret);
1152
1153 spin_unlock_bh(&ce->ce_lock);
1154
1155 return ret;
1156}
1157
1158static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1159{
1160 __le32 val = __cpu_to_le32(value);
1161
1162 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1163}
1164
1165
1166static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1167{
1168 struct ath10k *ar = ce_state->ar;
1169 struct sk_buff_head list;
1170 struct sk_buff *skb;
1171
1172 __skb_queue_head_init(&list);
1173 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1174
1175 if (skb == NULL)
1176 continue;
1177
1178 __skb_queue_tail(&list, skb);
1179 }
1180
1181 while ((skb = __skb_dequeue(&list)))
1182 ath10k_htc_tx_completion_handler(ar, skb);
1183}
1184
1185static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1186 void (*callback)(struct ath10k *ar,
1187 struct sk_buff *skb))
1188{
1189 struct ath10k *ar = ce_state->ar;
1190 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1191 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1192 struct sk_buff *skb;
1193 struct sk_buff_head list;
1194 void *transfer_context;
1195 unsigned int nbytes, max_nbytes;
1196
1197 __skb_queue_head_init(&list);
1198 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1199 &nbytes) == 0) {
1200 skb = transfer_context;
1201 max_nbytes = skb->len + skb_tailroom(skb);
1202 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1203 max_nbytes, DMA_FROM_DEVICE);
1204
1205 if (unlikely(max_nbytes < nbytes)) {
1206 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1207 nbytes, max_nbytes);
1208 dev_kfree_skb_any(skb);
1209 continue;
1210 }
1211
1212 skb_put(skb, nbytes);
1213 __skb_queue_tail(&list, skb);
1214 }
1215
1216 while ((skb = __skb_dequeue(&list))) {
1217 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1218 ce_state->id, skb->len);
1219 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1220 skb->data, skb->len);
1221
1222 callback(ar, skb);
1223 }
1224
1225 ath10k_pci_rx_post_pipe(pipe_info);
1226}
1227
1228static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1229 void (*callback)(struct ath10k *ar,
1230 struct sk_buff *skb))
1231{
1232 struct ath10k *ar = ce_state->ar;
1233 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1234 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1235 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1236 struct sk_buff *skb;
1237 struct sk_buff_head list;
1238 void *transfer_context;
1239 unsigned int nbytes, max_nbytes, nentries;
1240 int orig_len;
1241
1242
1243
1244
1245
1246 __skb_queue_head_init(&list);
1247 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1248 &nbytes) == 0) {
1249 skb = transfer_context;
1250 max_nbytes = skb->len + skb_tailroom(skb);
1251
1252 if (unlikely(max_nbytes < nbytes)) {
1253 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1254 nbytes, max_nbytes);
1255 continue;
1256 }
1257
1258 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1259 max_nbytes, DMA_FROM_DEVICE);
1260 skb_put(skb, nbytes);
1261 __skb_queue_tail(&list, skb);
1262 }
1263
1264 nentries = skb_queue_len(&list);
1265 while ((skb = __skb_dequeue(&list))) {
1266 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1267 ce_state->id, skb->len);
1268 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1269 skb->data, skb->len);
1270
1271 orig_len = skb->len;
1272 callback(ar, skb);
1273 skb_push(skb, orig_len - skb->len);
1274 skb_reset_tail_pointer(skb);
1275 skb_trim(skb, 0);
1276
1277
1278 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1279 skb->len + skb_tailroom(skb),
1280 DMA_FROM_DEVICE);
1281 }
1282 ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1283}
1284
1285
1286static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1287{
1288 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1289}
1290
1291static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1292{
1293
1294
1295
1296 ath10k_ce_per_engine_service(ce_state->ar, 4);
1297
1298 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1299}
1300
1301
1302
1303
1304static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1305{
1306 ath10k_pci_process_rx_cb(ce_state,
1307 ath10k_htt_rx_pktlog_completion_handler);
1308}
1309
1310
1311static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1312{
1313 struct ath10k *ar = ce_state->ar;
1314 struct sk_buff *skb;
1315
1316 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1317
1318 if (!skb)
1319 continue;
1320
1321 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1322 skb->len, DMA_TO_DEVICE);
1323 ath10k_htt_hif_tx_complete(ar, skb);
1324 }
1325}
1326
1327static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1328{
1329 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1330 ath10k_htt_t2h_msg_handler(ar, skb);
1331}
1332
1333
1334static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1335{
1336
1337
1338
1339 ath10k_ce_per_engine_service(ce_state->ar, 4);
1340
1341 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1342}
1343
1344int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1345 struct ath10k_hif_sg_item *items, int n_items)
1346{
1347 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1348 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1349 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1350 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1351 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1352 unsigned int nentries_mask;
1353 unsigned int sw_index;
1354 unsigned int write_index;
1355 int err, i = 0;
1356
1357 spin_lock_bh(&ce->ce_lock);
1358
1359 nentries_mask = src_ring->nentries_mask;
1360 sw_index = src_ring->sw_index;
1361 write_index = src_ring->write_index;
1362
1363 if (unlikely(CE_RING_DELTA(nentries_mask,
1364 write_index, sw_index - 1) < n_items)) {
1365 err = -ENOBUFS;
1366 goto err;
1367 }
1368
1369 for (i = 0; i < n_items - 1; i++) {
1370 ath10k_dbg(ar, ATH10K_DBG_PCI,
1371 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1372 i, items[i].paddr, items[i].len, n_items);
1373 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1374 items[i].vaddr, items[i].len);
1375
1376 err = ath10k_ce_send_nolock(ce_pipe,
1377 items[i].transfer_context,
1378 items[i].paddr,
1379 items[i].len,
1380 items[i].transfer_id,
1381 CE_SEND_FLAG_GATHER);
1382 if (err)
1383 goto err;
1384 }
1385
1386
1387
1388 ath10k_dbg(ar, ATH10K_DBG_PCI,
1389 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1390 i, items[i].paddr, items[i].len, n_items);
1391 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1392 items[i].vaddr, items[i].len);
1393
1394 err = ath10k_ce_send_nolock(ce_pipe,
1395 items[i].transfer_context,
1396 items[i].paddr,
1397 items[i].len,
1398 items[i].transfer_id,
1399 0);
1400 if (err)
1401 goto err;
1402
1403 spin_unlock_bh(&ce->ce_lock);
1404 return 0;
1405
1406err:
1407 for (; i > 0; i--)
1408 __ath10k_ce_send_revert(ce_pipe);
1409
1410 spin_unlock_bh(&ce->ce_lock);
1411 return err;
1412}
1413
1414int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1415 size_t buf_len)
1416{
1417 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1418}
1419
1420u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1421{
1422 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1423
1424 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1425
1426 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1427}
1428
1429static void ath10k_pci_dump_registers(struct ath10k *ar,
1430 struct ath10k_fw_crash_data *crash_data)
1431{
1432 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1433 int i, ret;
1434
1435 lockdep_assert_held(&ar->data_lock);
1436
1437 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
1438 hi_failure_state,
1439 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1440 if (ret) {
1441 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1442 return;
1443 }
1444
1445 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1446
1447 ath10k_err(ar, "firmware register dump:\n");
1448 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1449 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1450 i,
1451 __le32_to_cpu(reg_dump_values[i]),
1452 __le32_to_cpu(reg_dump_values[i + 1]),
1453 __le32_to_cpu(reg_dump_values[i + 2]),
1454 __le32_to_cpu(reg_dump_values[i + 3]));
1455
1456 if (!crash_data)
1457 return;
1458
1459 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1460 crash_data->registers[i] = reg_dump_values[i];
1461}
1462
1463static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1464{
1465 struct ath10k_fw_crash_data *crash_data;
1466 char uuid[50];
1467
1468 spin_lock_bh(&ar->data_lock);
1469
1470 ar->stats.fw_crash_counter++;
1471
1472 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1473
1474 if (crash_data)
1475 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1476 else
1477 scnprintf(uuid, sizeof(uuid), "n/a");
1478
1479 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
1480 ath10k_print_driver_info(ar);
1481 ath10k_pci_dump_registers(ar, crash_data);
1482 ath10k_ce_dump_registers(ar, crash_data);
1483
1484 spin_unlock_bh(&ar->data_lock);
1485
1486 queue_work(ar->workqueue, &ar->restart_work);
1487}
1488
1489void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1490 int force)
1491{
1492 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1493
1494 if (!force) {
1495 int resources;
1496
1497
1498
1499
1500
1501
1502
1503 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1504
1505
1506
1507
1508
1509 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1510 return;
1511 }
1512 ath10k_ce_per_engine_service(ar, pipe);
1513}
1514
1515static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1516{
1517 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1518
1519 del_timer_sync(&ar_pci->rx_post_retry);
1520}
1521
1522int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1523 u8 *ul_pipe, u8 *dl_pipe)
1524{
1525 const struct service_to_pipe *entry;
1526 bool ul_set = false, dl_set = false;
1527 int i;
1528
1529 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1530
1531 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1532 entry = &target_service_to_ce_map_wlan[i];
1533
1534 if (__le32_to_cpu(entry->service_id) != service_id)
1535 continue;
1536
1537 switch (__le32_to_cpu(entry->pipedir)) {
1538 case PIPEDIR_NONE:
1539 break;
1540 case PIPEDIR_IN:
1541 WARN_ON(dl_set);
1542 *dl_pipe = __le32_to_cpu(entry->pipenum);
1543 dl_set = true;
1544 break;
1545 case PIPEDIR_OUT:
1546 WARN_ON(ul_set);
1547 *ul_pipe = __le32_to_cpu(entry->pipenum);
1548 ul_set = true;
1549 break;
1550 case PIPEDIR_INOUT:
1551 WARN_ON(dl_set);
1552 WARN_ON(ul_set);
1553 *dl_pipe = __le32_to_cpu(entry->pipenum);
1554 *ul_pipe = __le32_to_cpu(entry->pipenum);
1555 dl_set = true;
1556 ul_set = true;
1557 break;
1558 }
1559 }
1560
1561 if (WARN_ON(!ul_set || !dl_set))
1562 return -ENOENT;
1563
1564 return 0;
1565}
1566
1567void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1568 u8 *ul_pipe, u8 *dl_pipe)
1569{
1570 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1571
1572 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1573 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1574 ul_pipe, dl_pipe);
1575}
1576
1577void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1578{
1579 u32 val;
1580
1581 switch (ar->hw_rev) {
1582 case ATH10K_HW_QCA988X:
1583 case ATH10K_HW_QCA9887:
1584 case ATH10K_HW_QCA6174:
1585 case ATH10K_HW_QCA9377:
1586 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1587 CORE_CTRL_ADDRESS);
1588 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1589 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1590 CORE_CTRL_ADDRESS, val);
1591 break;
1592 case ATH10K_HW_QCA99X0:
1593 case ATH10K_HW_QCA9984:
1594 case ATH10K_HW_QCA9888:
1595 case ATH10K_HW_QCA4019:
1596
1597
1598
1599 break;
1600 case ATH10K_HW_WCN3990:
1601 break;
1602 }
1603}
1604
1605static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1606{
1607 u32 val;
1608
1609 switch (ar->hw_rev) {
1610 case ATH10K_HW_QCA988X:
1611 case ATH10K_HW_QCA9887:
1612 case ATH10K_HW_QCA6174:
1613 case ATH10K_HW_QCA9377:
1614 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1615 CORE_CTRL_ADDRESS);
1616 val |= CORE_CTRL_PCIE_REG_31_MASK;
1617 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1618 CORE_CTRL_ADDRESS, val);
1619 break;
1620 case ATH10K_HW_QCA99X0:
1621 case ATH10K_HW_QCA9984:
1622 case ATH10K_HW_QCA9888:
1623 case ATH10K_HW_QCA4019:
1624
1625
1626
1627 break;
1628 case ATH10K_HW_WCN3990:
1629 break;
1630 }
1631}
1632
1633static void ath10k_pci_irq_disable(struct ath10k *ar)
1634{
1635 ath10k_ce_disable_interrupts(ar);
1636 ath10k_pci_disable_and_clear_legacy_irq(ar);
1637 ath10k_pci_irq_msi_fw_mask(ar);
1638}
1639
1640static void ath10k_pci_irq_sync(struct ath10k *ar)
1641{
1642 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1643
1644 synchronize_irq(ar_pci->pdev->irq);
1645}
1646
1647static void ath10k_pci_irq_enable(struct ath10k *ar)
1648{
1649 ath10k_ce_enable_interrupts(ar);
1650 ath10k_pci_enable_legacy_irq(ar);
1651 ath10k_pci_irq_msi_fw_unmask(ar);
1652}
1653
1654static int ath10k_pci_hif_start(struct ath10k *ar)
1655{
1656 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1657
1658 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1659
1660 napi_enable(&ar->napi);
1661
1662 ath10k_pci_irq_enable(ar);
1663 ath10k_pci_rx_post(ar);
1664
1665 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1666 ar_pci->link_ctl);
1667
1668 return 0;
1669}
1670
1671static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1672{
1673 struct ath10k *ar;
1674 struct ath10k_ce_pipe *ce_pipe;
1675 struct ath10k_ce_ring *ce_ring;
1676 struct sk_buff *skb;
1677 int i;
1678
1679 ar = pci_pipe->hif_ce_state;
1680 ce_pipe = pci_pipe->ce_hdl;
1681 ce_ring = ce_pipe->dest_ring;
1682
1683 if (!ce_ring)
1684 return;
1685
1686 if (!pci_pipe->buf_sz)
1687 return;
1688
1689 for (i = 0; i < ce_ring->nentries; i++) {
1690 skb = ce_ring->per_transfer_context[i];
1691 if (!skb)
1692 continue;
1693
1694 ce_ring->per_transfer_context[i] = NULL;
1695
1696 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1697 skb->len + skb_tailroom(skb),
1698 DMA_FROM_DEVICE);
1699 dev_kfree_skb_any(skb);
1700 }
1701}
1702
1703static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1704{
1705 struct ath10k *ar;
1706 struct ath10k_ce_pipe *ce_pipe;
1707 struct ath10k_ce_ring *ce_ring;
1708 struct sk_buff *skb;
1709 int i;
1710
1711 ar = pci_pipe->hif_ce_state;
1712 ce_pipe = pci_pipe->ce_hdl;
1713 ce_ring = ce_pipe->src_ring;
1714
1715 if (!ce_ring)
1716 return;
1717
1718 if (!pci_pipe->buf_sz)
1719 return;
1720
1721 for (i = 0; i < ce_ring->nentries; i++) {
1722 skb = ce_ring->per_transfer_context[i];
1723 if (!skb)
1724 continue;
1725
1726 ce_ring->per_transfer_context[i] = NULL;
1727
1728 ath10k_htc_tx_completion_handler(ar, skb);
1729 }
1730}
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1741{
1742 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1743 int pipe_num;
1744
1745 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1746 struct ath10k_pci_pipe *pipe_info;
1747
1748 pipe_info = &ar_pci->pipe_info[pipe_num];
1749 ath10k_pci_rx_pipe_cleanup(pipe_info);
1750 ath10k_pci_tx_pipe_cleanup(pipe_info);
1751 }
1752}
1753
1754void ath10k_pci_ce_deinit(struct ath10k *ar)
1755{
1756 int i;
1757
1758 for (i = 0; i < CE_COUNT; i++)
1759 ath10k_ce_deinit_pipe(ar, i);
1760}
1761
1762void ath10k_pci_flush(struct ath10k *ar)
1763{
1764 ath10k_pci_rx_retry_sync(ar);
1765 ath10k_pci_buffer_cleanup(ar);
1766}
1767
1768static void ath10k_pci_hif_stop(struct ath10k *ar)
1769{
1770 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1771 unsigned long flags;
1772
1773 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786 ath10k_pci_safe_chip_reset(ar);
1787
1788 ath10k_pci_irq_disable(ar);
1789 ath10k_pci_irq_sync(ar);
1790 ath10k_pci_flush(ar);
1791 napi_synchronize(&ar->napi);
1792 napi_disable(&ar->napi);
1793
1794 spin_lock_irqsave(&ar_pci->ps_lock, flags);
1795 WARN_ON(ar_pci->ps_wake_refcount > 0);
1796 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
1797}
1798
1799int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1800 void *req, u32 req_len,
1801 void *resp, u32 *resp_len)
1802{
1803 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1804 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1805 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1806 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1807 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1808 dma_addr_t req_paddr = 0;
1809 dma_addr_t resp_paddr = 0;
1810 struct bmi_xfer xfer = {};
1811 void *treq, *tresp = NULL;
1812 int ret = 0;
1813
1814 might_sleep();
1815
1816 if (resp && !resp_len)
1817 return -EINVAL;
1818
1819 if (resp && resp_len && *resp_len == 0)
1820 return -EINVAL;
1821
1822 treq = kmemdup(req, req_len, GFP_KERNEL);
1823 if (!treq)
1824 return -ENOMEM;
1825
1826 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1827 ret = dma_mapping_error(ar->dev, req_paddr);
1828 if (ret) {
1829 ret = -EIO;
1830 goto err_dma;
1831 }
1832
1833 if (resp && resp_len) {
1834 tresp = kzalloc(*resp_len, GFP_KERNEL);
1835 if (!tresp) {
1836 ret = -ENOMEM;
1837 goto err_req;
1838 }
1839
1840 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1841 DMA_FROM_DEVICE);
1842 ret = dma_mapping_error(ar->dev, resp_paddr);
1843 if (ret) {
1844 ret = -EIO;
1845 goto err_req;
1846 }
1847
1848 xfer.wait_for_resp = true;
1849 xfer.resp_len = 0;
1850
1851 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1852 }
1853
1854 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1855 if (ret)
1856 goto err_resp;
1857
1858 ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
1859 if (ret) {
1860 u32 unused_buffer;
1861 unsigned int unused_nbytes;
1862 unsigned int unused_id;
1863
1864 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1865 &unused_nbytes, &unused_id);
1866 } else {
1867
1868 ret = 0;
1869 }
1870
1871err_resp:
1872 if (resp) {
1873 u32 unused_buffer;
1874
1875 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1876 dma_unmap_single(ar->dev, resp_paddr,
1877 *resp_len, DMA_FROM_DEVICE);
1878 }
1879err_req:
1880 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1881
1882 if (ret == 0 && resp_len) {
1883 *resp_len = min(*resp_len, xfer.resp_len);
1884 memcpy(resp, tresp, xfer.resp_len);
1885 }
1886err_dma:
1887 kfree(treq);
1888 kfree(tresp);
1889
1890 return ret;
1891}
1892
1893static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1894{
1895 struct bmi_xfer *xfer;
1896
1897 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
1898 return;
1899
1900 xfer->tx_done = true;
1901}
1902
1903static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1904{
1905 struct ath10k *ar = ce_state->ar;
1906 struct bmi_xfer *xfer;
1907 unsigned int nbytes;
1908
1909 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
1910 &nbytes))
1911 return;
1912
1913 if (WARN_ON_ONCE(!xfer))
1914 return;
1915
1916 if (!xfer->wait_for_resp) {
1917 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1918 return;
1919 }
1920
1921 xfer->resp_len = nbytes;
1922 xfer->rx_done = true;
1923}
1924
1925static int ath10k_pci_bmi_wait(struct ath10k *ar,
1926 struct ath10k_ce_pipe *tx_pipe,
1927 struct ath10k_ce_pipe *rx_pipe,
1928 struct bmi_xfer *xfer)
1929{
1930 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1931 unsigned long started = jiffies;
1932 unsigned long dur;
1933 int ret;
1934
1935 while (time_before_eq(jiffies, timeout)) {
1936 ath10k_pci_bmi_send_done(tx_pipe);
1937 ath10k_pci_bmi_recv_data(rx_pipe);
1938
1939 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
1940 ret = 0;
1941 goto out;
1942 }
1943
1944 schedule();
1945 }
1946
1947 ret = -ETIMEDOUT;
1948
1949out:
1950 dur = jiffies - started;
1951 if (dur > HZ)
1952 ath10k_dbg(ar, ATH10K_DBG_BMI,
1953 "bmi cmd took %lu jiffies hz %d ret %d\n",
1954 dur, HZ, ret);
1955 return ret;
1956}
1957
1958
1959
1960
1961
1962static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1963{
1964 u32 addr, val;
1965
1966 addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
1967 val = ath10k_pci_read32(ar, addr);
1968 val |= CORE_CTRL_CPU_INTR_MASK;
1969 ath10k_pci_write32(ar, addr, val);
1970
1971 return 0;
1972}
1973
1974static int ath10k_pci_get_num_banks(struct ath10k *ar)
1975{
1976 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1977
1978 switch (ar_pci->pdev->device) {
1979 case QCA988X_2_0_DEVICE_ID:
1980 case QCA99X0_2_0_DEVICE_ID:
1981 case QCA9888_2_0_DEVICE_ID:
1982 case QCA9984_1_0_DEVICE_ID:
1983 case QCA9887_1_0_DEVICE_ID:
1984 return 1;
1985 case QCA6164_2_1_DEVICE_ID:
1986 case QCA6174_2_1_DEVICE_ID:
1987 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1988 case QCA6174_HW_1_0_CHIP_ID_REV:
1989 case QCA6174_HW_1_1_CHIP_ID_REV:
1990 case QCA6174_HW_2_1_CHIP_ID_REV:
1991 case QCA6174_HW_2_2_CHIP_ID_REV:
1992 return 3;
1993 case QCA6174_HW_1_3_CHIP_ID_REV:
1994 return 2;
1995 case QCA6174_HW_3_0_CHIP_ID_REV:
1996 case QCA6174_HW_3_1_CHIP_ID_REV:
1997 case QCA6174_HW_3_2_CHIP_ID_REV:
1998 return 9;
1999 }
2000 break;
2001 case QCA9377_1_0_DEVICE_ID:
2002 return 4;
2003 }
2004
2005 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2006 return 1;
2007}
2008
2009static int ath10k_bus_get_num_banks(struct ath10k *ar)
2010{
2011 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2012
2013 return ce->bus_ops->get_num_banks(ar);
2014}
2015
2016int ath10k_pci_init_config(struct ath10k *ar)
2017{
2018 u32 interconnect_targ_addr;
2019 u32 pcie_state_targ_addr = 0;
2020 u32 pipe_cfg_targ_addr = 0;
2021 u32 svc_to_pipe_map = 0;
2022 u32 pcie_config_flags = 0;
2023 u32 ealloc_value;
2024 u32 ealloc_targ_addr;
2025 u32 flag2_value;
2026 u32 flag2_targ_addr;
2027 int ret = 0;
2028
2029
2030 interconnect_targ_addr =
2031 host_interest_item_address(HI_ITEM(hi_interconnect_state));
2032
2033
2034 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2035 &pcie_state_targ_addr);
2036 if (ret != 0) {
2037 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2038 return ret;
2039 }
2040
2041 if (pcie_state_targ_addr == 0) {
2042 ret = -EIO;
2043 ath10k_err(ar, "Invalid pcie state addr\n");
2044 return ret;
2045 }
2046
2047 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2048 offsetof(struct pcie_state,
2049 pipe_cfg_addr)),
2050 &pipe_cfg_targ_addr);
2051 if (ret != 0) {
2052 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2053 return ret;
2054 }
2055
2056 if (pipe_cfg_targ_addr == 0) {
2057 ret = -EIO;
2058 ath10k_err(ar, "Invalid pipe cfg addr\n");
2059 return ret;
2060 }
2061
2062 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2063 target_ce_config_wlan,
2064 sizeof(struct ce_pipe_config) *
2065 NUM_TARGET_CE_CONFIG_WLAN);
2066
2067 if (ret != 0) {
2068 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2069 return ret;
2070 }
2071
2072 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2073 offsetof(struct pcie_state,
2074 svc_to_pipe_map)),
2075 &svc_to_pipe_map);
2076 if (ret != 0) {
2077 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2078 return ret;
2079 }
2080
2081 if (svc_to_pipe_map == 0) {
2082 ret = -EIO;
2083 ath10k_err(ar, "Invalid svc_to_pipe map\n");
2084 return ret;
2085 }
2086
2087 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2088 target_service_to_ce_map_wlan,
2089 sizeof(target_service_to_ce_map_wlan));
2090 if (ret != 0) {
2091 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2092 return ret;
2093 }
2094
2095 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2096 offsetof(struct pcie_state,
2097 config_flags)),
2098 &pcie_config_flags);
2099 if (ret != 0) {
2100 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2101 return ret;
2102 }
2103
2104 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2105
2106 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2107 offsetof(struct pcie_state,
2108 config_flags)),
2109 pcie_config_flags);
2110 if (ret != 0) {
2111 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2112 return ret;
2113 }
2114
2115
2116 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2117
2118 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2119 if (ret != 0) {
2120 ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2121 return ret;
2122 }
2123
2124
2125 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2126 HI_EARLY_ALLOC_MAGIC_MASK);
2127 ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2128 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2129 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2130
2131 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2132 if (ret != 0) {
2133 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2134 return ret;
2135 }
2136
2137
2138 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2139
2140 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2141 if (ret != 0) {
2142 ath10k_err(ar, "Failed to get option val: %d\n", ret);
2143 return ret;
2144 }
2145
2146 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2147
2148 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2149 if (ret != 0) {
2150 ath10k_err(ar, "Failed to set option val: %d\n", ret);
2151 return ret;
2152 }
2153
2154 return 0;
2155}
2156
2157static void ath10k_pci_override_ce_config(struct ath10k *ar)
2158{
2159 struct ce_attr *attr;
2160 struct ce_pipe_config *config;
2161
2162
2163
2164
2165
2166
2167 attr = &host_ce_config_wlan[5];
2168 attr->src_sz_max = 0;
2169 attr->dest_nentries = 0;
2170
2171
2172 config = &target_ce_config_wlan[5];
2173 config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2174 config->nbytes_max = __cpu_to_le32(2048);
2175
2176
2177 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2178}
2179
2180int ath10k_pci_alloc_pipes(struct ath10k *ar)
2181{
2182 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2183 struct ath10k_pci_pipe *pipe;
2184 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2185 int i, ret;
2186
2187 for (i = 0; i < CE_COUNT; i++) {
2188 pipe = &ar_pci->pipe_info[i];
2189 pipe->ce_hdl = &ce->ce_states[i];
2190 pipe->pipe_num = i;
2191 pipe->hif_ce_state = ar;
2192
2193 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
2194 if (ret) {
2195 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2196 i, ret);
2197 return ret;
2198 }
2199
2200
2201 if (i == CE_DIAG_PIPE) {
2202 ar_pci->ce_diag = pipe->ce_hdl;
2203 continue;
2204 }
2205
2206 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2207 }
2208
2209 return 0;
2210}
2211
2212void ath10k_pci_free_pipes(struct ath10k *ar)
2213{
2214 int i;
2215
2216 for (i = 0; i < CE_COUNT; i++)
2217 ath10k_ce_free_pipe(ar, i);
2218}
2219
2220int ath10k_pci_init_pipes(struct ath10k *ar)
2221{
2222 int i, ret;
2223
2224 for (i = 0; i < CE_COUNT; i++) {
2225 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2226 if (ret) {
2227 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2228 i, ret);
2229 return ret;
2230 }
2231 }
2232
2233 return 0;
2234}
2235
2236static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2237{
2238 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2239 FW_IND_EVENT_PENDING;
2240}
2241
2242static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2243{
2244 u32 val;
2245
2246 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2247 val &= ~FW_IND_EVENT_PENDING;
2248 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2249}
2250
2251static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2252{
2253 u32 val;
2254
2255 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2256 return (val == 0xffffffff);
2257}
2258
2259
2260static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2261{
2262 u32 val;
2263
2264 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2265 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2266 val | SOC_RESET_CONTROL_SI0_RST_MASK);
2267 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2268
2269 msleep(10);
2270
2271 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2272 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2273 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2274 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2275
2276 msleep(10);
2277}
2278
2279static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2280{
2281 u32 val;
2282
2283 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2284
2285 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2286 SOC_RESET_CONTROL_ADDRESS);
2287 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2288 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2289}
2290
2291static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2292{
2293 u32 val;
2294
2295 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2296 SOC_RESET_CONTROL_ADDRESS);
2297
2298 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2299 val | SOC_RESET_CONTROL_CE_RST_MASK);
2300 msleep(10);
2301 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2302 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2303}
2304
2305static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2306{
2307 u32 val;
2308
2309 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2310 SOC_LF_TIMER_CONTROL0_ADDRESS);
2311 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2312 SOC_LF_TIMER_CONTROL0_ADDRESS,
2313 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2314}
2315
2316static int ath10k_pci_warm_reset(struct ath10k *ar)
2317{
2318 int ret;
2319
2320 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2321
2322 spin_lock_bh(&ar->data_lock);
2323 ar->stats.fw_warm_reset_counter++;
2324 spin_unlock_bh(&ar->data_lock);
2325
2326 ath10k_pci_irq_disable(ar);
2327
2328
2329
2330
2331
2332
2333 ath10k_pci_warm_reset_si0(ar);
2334 ath10k_pci_warm_reset_cpu(ar);
2335 ath10k_pci_init_pipes(ar);
2336 ath10k_pci_wait_for_target_init(ar);
2337
2338 ath10k_pci_warm_reset_clear_lf(ar);
2339 ath10k_pci_warm_reset_ce(ar);
2340 ath10k_pci_warm_reset_cpu(ar);
2341 ath10k_pci_init_pipes(ar);
2342
2343 ret = ath10k_pci_wait_for_target_init(ar);
2344 if (ret) {
2345 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2346 return ret;
2347 }
2348
2349 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2350
2351 return 0;
2352}
2353
2354static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2355{
2356 ath10k_pci_irq_disable(ar);
2357 return ath10k_pci_qca99x0_chip_reset(ar);
2358}
2359
2360static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2361{
2362 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2363
2364 if (!ar_pci->pci_soft_reset)
2365 return -ENOTSUPP;
2366
2367 return ar_pci->pci_soft_reset(ar);
2368}
2369
2370static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2371{
2372 int i, ret;
2373 u32 val;
2374
2375 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2376
2377
2378
2379
2380
2381
2382
2383
2384 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2385 ret = ath10k_pci_warm_reset(ar);
2386 if (ret) {
2387 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2388 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2389 ret);
2390 continue;
2391 }
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402 ret = ath10k_pci_init_pipes(ar);
2403 if (ret) {
2404 ath10k_warn(ar, "failed to init copy engine: %d\n",
2405 ret);
2406 continue;
2407 }
2408
2409 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2410 &val);
2411 if (ret) {
2412 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2413 ret);
2414 continue;
2415 }
2416
2417 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2418 return 0;
2419 }
2420
2421 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2422 ath10k_warn(ar, "refusing cold reset as requested\n");
2423 return -EPERM;
2424 }
2425
2426 ret = ath10k_pci_cold_reset(ar);
2427 if (ret) {
2428 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2429 return ret;
2430 }
2431
2432 ret = ath10k_pci_wait_for_target_init(ar);
2433 if (ret) {
2434 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2435 ret);
2436 return ret;
2437 }
2438
2439 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2440
2441 return 0;
2442}
2443
2444static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2445{
2446 int ret;
2447
2448 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2449
2450
2451
2452 ret = ath10k_pci_cold_reset(ar);
2453 if (ret) {
2454 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2455 return ret;
2456 }
2457
2458 ret = ath10k_pci_wait_for_target_init(ar);
2459 if (ret) {
2460 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2461 ret);
2462 return ret;
2463 }
2464
2465 ret = ath10k_pci_warm_reset(ar);
2466 if (ret) {
2467 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2468 return ret;
2469 }
2470
2471 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2472
2473 return 0;
2474}
2475
2476static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2477{
2478 int ret;
2479
2480 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2481
2482 ret = ath10k_pci_cold_reset(ar);
2483 if (ret) {
2484 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2485 return ret;
2486 }
2487
2488 ret = ath10k_pci_wait_for_target_init(ar);
2489 if (ret) {
2490 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2491 ret);
2492 return ret;
2493 }
2494
2495 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2496
2497 return 0;
2498}
2499
2500static int ath10k_pci_chip_reset(struct ath10k *ar)
2501{
2502 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2503
2504 if (WARN_ON(!ar_pci->pci_hard_reset))
2505 return -ENOTSUPP;
2506
2507 return ar_pci->pci_hard_reset(ar);
2508}
2509
2510static int ath10k_pci_hif_power_up(struct ath10k *ar)
2511{
2512 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2513 int ret;
2514
2515 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2516
2517 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2518 &ar_pci->link_ctl);
2519 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2520 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532 ret = ath10k_pci_chip_reset(ar);
2533 if (ret) {
2534 if (ath10k_pci_has_fw_crashed(ar)) {
2535 ath10k_warn(ar, "firmware crashed during chip reset\n");
2536 ath10k_pci_fw_crashed_clear(ar);
2537 ath10k_pci_fw_crashed_dump(ar);
2538 }
2539
2540 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2541 goto err_sleep;
2542 }
2543
2544 ret = ath10k_pci_init_pipes(ar);
2545 if (ret) {
2546 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2547 goto err_sleep;
2548 }
2549
2550 ret = ath10k_pci_init_config(ar);
2551 if (ret) {
2552 ath10k_err(ar, "failed to setup init config: %d\n", ret);
2553 goto err_ce;
2554 }
2555
2556 ret = ath10k_pci_wake_target_cpu(ar);
2557 if (ret) {
2558 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2559 goto err_ce;
2560 }
2561
2562 return 0;
2563
2564err_ce:
2565 ath10k_pci_ce_deinit(ar);
2566
2567err_sleep:
2568 return ret;
2569}
2570
2571void ath10k_pci_hif_power_down(struct ath10k *ar)
2572{
2573 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2574
2575
2576
2577
2578}
2579
2580static int ath10k_pci_hif_suspend(struct ath10k *ar)
2581{
2582
2583 return 0;
2584}
2585
2586static int ath10k_pci_suspend(struct ath10k *ar)
2587{
2588
2589
2590
2591
2592
2593 ath10k_pci_sleep_sync(ar);
2594
2595 return 0;
2596}
2597
2598static int ath10k_pci_hif_resume(struct ath10k *ar)
2599{
2600
2601 return 0;
2602}
2603
2604static int ath10k_pci_resume(struct ath10k *ar)
2605{
2606 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2607 struct pci_dev *pdev = ar_pci->pdev;
2608 u32 val;
2609 int ret = 0;
2610
2611 ret = ath10k_pci_force_wake(ar);
2612 if (ret) {
2613 ath10k_err(ar, "failed to wake up target: %d\n", ret);
2614 return ret;
2615 }
2616
2617
2618
2619
2620
2621
2622 pci_read_config_dword(pdev, 0x40, &val);
2623 if ((val & 0x0000ff00) != 0)
2624 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2625
2626 return ret;
2627}
2628
2629static bool ath10k_pci_validate_cal(void *data, size_t size)
2630{
2631 __le16 *cal_words = data;
2632 u16 checksum = 0;
2633 size_t i;
2634
2635 if (size % 2 != 0)
2636 return false;
2637
2638 for (i = 0; i < size / 2; i++)
2639 checksum ^= le16_to_cpu(cal_words[i]);
2640
2641 return checksum == 0xffff;
2642}
2643
2644static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2645{
2646
2647 ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2648
2649
2650 ath10k_pci_write32(ar,
2651 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2652 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2653 SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2654 GPIO_PIN0_CONFIG) |
2655 SM(1, GPIO_PIN0_PAD_PULL));
2656
2657 ath10k_pci_write32(ar,
2658 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2659 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2660 SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2661 SM(1, GPIO_PIN0_PAD_PULL));
2662
2663 ath10k_pci_write32(ar,
2664 GPIO_BASE_ADDRESS +
2665 QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2666 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2667
2668
2669 ath10k_pci_write32(ar,
2670 SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2671 SM(1, SI_CONFIG_ERR_INT) |
2672 SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2673 SM(1, SI_CONFIG_I2C) |
2674 SM(1, SI_CONFIG_POS_SAMPLE) |
2675 SM(1, SI_CONFIG_INACTIVE_DATA) |
2676 SM(1, SI_CONFIG_INACTIVE_CLK) |
2677 SM(8, SI_CONFIG_DIVIDER));
2678}
2679
2680static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2681{
2682 u32 reg;
2683 int wait_limit;
2684
2685
2686 reg = QCA9887_EEPROM_SELECT_READ |
2687 SM(addr, QCA9887_EEPROM_ADDR_LO) |
2688 SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2689 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2690
2691
2692 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2693 SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2694 SM(4, SI_CS_TX_CNT));
2695
2696
2697 wait_limit = 100000;
2698
2699
2700 do {
2701 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
2702 if (MS(reg, SI_CS_DONE_INT))
2703 break;
2704
2705 wait_limit--;
2706 udelay(10);
2707 } while (wait_limit > 0);
2708
2709 if (!MS(reg, SI_CS_DONE_INT)) {
2710 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
2711 addr);
2712 return -ETIMEDOUT;
2713 }
2714
2715
2716 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
2717
2718 if (MS(reg, SI_CS_DONE_ERR)) {
2719 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
2720 return -EIO;
2721 }
2722
2723
2724 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
2725 *out = reg;
2726
2727 return 0;
2728}
2729
2730static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
2731 size_t *data_len)
2732{
2733 u8 *caldata = NULL;
2734 size_t calsize, i;
2735 int ret;
2736
2737 if (!QCA_REV_9887(ar))
2738 return -EOPNOTSUPP;
2739
2740 calsize = ar->hw_params.cal_data_len;
2741 caldata = kmalloc(calsize, GFP_KERNEL);
2742 if (!caldata)
2743 return -ENOMEM;
2744
2745 ath10k_pci_enable_eeprom(ar);
2746
2747 for (i = 0; i < calsize; i++) {
2748 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
2749 if (ret)
2750 goto err_free;
2751 }
2752
2753 if (!ath10k_pci_validate_cal(caldata, calsize))
2754 goto err_free;
2755
2756 *data = caldata;
2757 *data_len = calsize;
2758
2759 return 0;
2760
2761err_free:
2762 kfree(caldata);
2763
2764 return -EINVAL;
2765}
2766
2767static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2768 .tx_sg = ath10k_pci_hif_tx_sg,
2769 .diag_read = ath10k_pci_hif_diag_read,
2770 .diag_write = ath10k_pci_diag_write_mem,
2771 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2772 .start = ath10k_pci_hif_start,
2773 .stop = ath10k_pci_hif_stop,
2774 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2775 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2776 .send_complete_check = ath10k_pci_hif_send_complete_check,
2777 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2778 .power_up = ath10k_pci_hif_power_up,
2779 .power_down = ath10k_pci_hif_power_down,
2780 .read32 = ath10k_pci_read32,
2781 .write32 = ath10k_pci_write32,
2782 .suspend = ath10k_pci_hif_suspend,
2783 .resume = ath10k_pci_hif_resume,
2784 .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
2785};
2786
2787
2788
2789
2790
2791
2792static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2793{
2794 struct ath10k *ar = arg;
2795 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2796 int ret;
2797
2798 if (ath10k_pci_has_device_gone(ar))
2799 return IRQ_NONE;
2800
2801 ret = ath10k_pci_force_wake(ar);
2802 if (ret) {
2803 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
2804 return IRQ_NONE;
2805 }
2806
2807 if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
2808 !ath10k_pci_irq_pending(ar))
2809 return IRQ_NONE;
2810
2811 ath10k_pci_disable_and_clear_legacy_irq(ar);
2812 ath10k_pci_irq_msi_fw_mask(ar);
2813 napi_schedule(&ar->napi);
2814
2815 return IRQ_HANDLED;
2816}
2817
2818static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
2819{
2820 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
2821 int done = 0;
2822
2823 if (ath10k_pci_has_fw_crashed(ar)) {
2824 ath10k_pci_fw_crashed_clear(ar);
2825 ath10k_pci_fw_crashed_dump(ar);
2826 napi_complete(ctx);
2827 return done;
2828 }
2829
2830 ath10k_ce_per_engine_service_any(ar);
2831
2832 done = ath10k_htt_txrx_compl_task(ar, budget);
2833
2834 if (done < budget) {
2835 napi_complete_done(ctx, done);
2836
2837
2838
2839
2840
2841
2842
2843
2844 if (ath10k_ce_interrupt_summary(ar)) {
2845 napi_reschedule(ctx);
2846 goto out;
2847 }
2848 ath10k_pci_enable_legacy_irq(ar);
2849 ath10k_pci_irq_msi_fw_unmask(ar);
2850 }
2851
2852out:
2853 return done;
2854}
2855
2856static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2857{
2858 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2859 int ret;
2860
2861 ret = request_irq(ar_pci->pdev->irq,
2862 ath10k_pci_interrupt_handler,
2863 IRQF_SHARED, "ath10k_pci", ar);
2864 if (ret) {
2865 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2866 ar_pci->pdev->irq, ret);
2867 return ret;
2868 }
2869
2870 return 0;
2871}
2872
2873static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2874{
2875 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2876 int ret;
2877
2878 ret = request_irq(ar_pci->pdev->irq,
2879 ath10k_pci_interrupt_handler,
2880 IRQF_SHARED, "ath10k_pci", ar);
2881 if (ret) {
2882 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2883 ar_pci->pdev->irq, ret);
2884 return ret;
2885 }
2886
2887 return 0;
2888}
2889
2890static int ath10k_pci_request_irq(struct ath10k *ar)
2891{
2892 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2893
2894 switch (ar_pci->oper_irq_mode) {
2895 case ATH10K_PCI_IRQ_LEGACY:
2896 return ath10k_pci_request_irq_legacy(ar);
2897 case ATH10K_PCI_IRQ_MSI:
2898 return ath10k_pci_request_irq_msi(ar);
2899 default:
2900 return -EINVAL;
2901 }
2902}
2903
2904static void ath10k_pci_free_irq(struct ath10k *ar)
2905{
2906 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2907
2908 free_irq(ar_pci->pdev->irq, ar);
2909}
2910
2911void ath10k_pci_init_napi(struct ath10k *ar)
2912{
2913 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
2914 ATH10K_NAPI_BUDGET);
2915}
2916
2917static int ath10k_pci_init_irq(struct ath10k *ar)
2918{
2919 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2920 int ret;
2921
2922 ath10k_pci_init_napi(ar);
2923
2924 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2925 ath10k_info(ar, "limiting irq mode to: %d\n",
2926 ath10k_pci_irq_mode);
2927
2928
2929 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2930 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
2931 ret = pci_enable_msi(ar_pci->pdev);
2932 if (ret == 0)
2933 return 0;
2934
2935
2936 }
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
2948
2949 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2950 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2951
2952 return 0;
2953}
2954
2955static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2956{
2957 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2958 0);
2959}
2960
2961static int ath10k_pci_deinit_irq(struct ath10k *ar)
2962{
2963 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2964
2965 switch (ar_pci->oper_irq_mode) {
2966 case ATH10K_PCI_IRQ_LEGACY:
2967 ath10k_pci_deinit_irq_legacy(ar);
2968 break;
2969 default:
2970 pci_disable_msi(ar_pci->pdev);
2971 break;
2972 }
2973
2974 return 0;
2975}
2976
2977int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2978{
2979 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2980 unsigned long timeout;
2981 u32 val;
2982
2983 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2984
2985 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2986
2987 do {
2988 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2989
2990 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2991 val);
2992
2993
2994 if (val == 0xffffffff)
2995 continue;
2996
2997
2998 if (val & FW_IND_EVENT_PENDING)
2999 break;
3000
3001 if (val & FW_IND_INITIALIZED)
3002 break;
3003
3004 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3005
3006 ath10k_pci_enable_legacy_irq(ar);
3007
3008 mdelay(10);
3009 } while (time_before(jiffies, timeout));
3010
3011 ath10k_pci_disable_and_clear_legacy_irq(ar);
3012 ath10k_pci_irq_msi_fw_mask(ar);
3013
3014 if (val == 0xffffffff) {
3015 ath10k_err(ar, "failed to read device register, device is gone\n");
3016 return -EIO;
3017 }
3018
3019 if (val & FW_IND_EVENT_PENDING) {
3020 ath10k_warn(ar, "device has crashed during init\n");
3021 return -ECOMM;
3022 }
3023
3024 if (!(val & FW_IND_INITIALIZED)) {
3025 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3026 val);
3027 return -ETIMEDOUT;
3028 }
3029
3030 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3031 return 0;
3032}
3033
3034static int ath10k_pci_cold_reset(struct ath10k *ar)
3035{
3036 u32 val;
3037
3038 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3039
3040 spin_lock_bh(&ar->data_lock);
3041
3042 ar->stats.fw_cold_reset_counter++;
3043
3044 spin_unlock_bh(&ar->data_lock);
3045
3046
3047 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3048 val |= 1;
3049 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3050
3051
3052
3053
3054
3055
3056 msleep(20);
3057
3058
3059 val &= ~1;
3060 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3061
3062 msleep(20);
3063
3064 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3065
3066 return 0;
3067}
3068
3069static int ath10k_pci_claim(struct ath10k *ar)
3070{
3071 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3072 struct pci_dev *pdev = ar_pci->pdev;
3073 int ret;
3074
3075 pci_set_drvdata(pdev, ar);
3076
3077 ret = pci_enable_device(pdev);
3078 if (ret) {
3079 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3080 return ret;
3081 }
3082
3083 ret = pci_request_region(pdev, BAR_NUM, "ath");
3084 if (ret) {
3085 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3086 ret);
3087 goto err_device;
3088 }
3089
3090
3091 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3092 if (ret) {
3093 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3094 goto err_region;
3095 }
3096
3097 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3098 if (ret) {
3099 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
3100 ret);
3101 goto err_region;
3102 }
3103
3104 pci_set_master(pdev);
3105
3106
3107 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3108 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3109 if (!ar_pci->mem) {
3110 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3111 ret = -EIO;
3112 goto err_master;
3113 }
3114
3115 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3116 return 0;
3117
3118err_master:
3119 pci_clear_master(pdev);
3120
3121err_region:
3122 pci_release_region(pdev, BAR_NUM);
3123
3124err_device:
3125 pci_disable_device(pdev);
3126
3127 return ret;
3128}
3129
3130static void ath10k_pci_release(struct ath10k *ar)
3131{
3132 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3133 struct pci_dev *pdev = ar_pci->pdev;
3134
3135 pci_iounmap(pdev, ar_pci->mem);
3136 pci_release_region(pdev, BAR_NUM);
3137 pci_clear_master(pdev);
3138 pci_disable_device(pdev);
3139}
3140
3141static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3142{
3143 const struct ath10k_pci_supp_chip *supp_chip;
3144 int i;
3145 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3146
3147 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3148 supp_chip = &ath10k_pci_supp_chips[i];
3149
3150 if (supp_chip->dev_id == dev_id &&
3151 supp_chip->rev_id == rev_id)
3152 return true;
3153 }
3154
3155 return false;
3156}
3157
3158int ath10k_pci_setup_resource(struct ath10k *ar)
3159{
3160 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3161 struct ath10k_ce *ce = ath10k_ce_priv(ar);
3162 int ret;
3163
3164 spin_lock_init(&ce->ce_lock);
3165 spin_lock_init(&ar_pci->ps_lock);
3166
3167 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
3168 (unsigned long)ar);
3169
3170 if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3171 ath10k_pci_override_ce_config(ar);
3172
3173 ret = ath10k_pci_alloc_pipes(ar);
3174 if (ret) {
3175 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3176 ret);
3177 return ret;
3178 }
3179
3180 return 0;
3181}
3182
3183void ath10k_pci_release_resource(struct ath10k *ar)
3184{
3185 ath10k_pci_rx_retry_sync(ar);
3186 netif_napi_del(&ar->napi);
3187 ath10k_pci_ce_deinit(ar);
3188 ath10k_pci_free_pipes(ar);
3189}
3190
3191static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3192 .read32 = ath10k_bus_pci_read32,
3193 .write32 = ath10k_bus_pci_write32,
3194 .get_num_banks = ath10k_pci_get_num_banks,
3195};
3196
3197static int ath10k_pci_probe(struct pci_dev *pdev,
3198 const struct pci_device_id *pci_dev)
3199{
3200 int ret = 0;
3201 struct ath10k *ar;
3202 struct ath10k_pci *ar_pci;
3203 enum ath10k_hw_rev hw_rev;
3204 u32 chip_id;
3205 bool pci_ps;
3206 int (*pci_soft_reset)(struct ath10k *ar);
3207 int (*pci_hard_reset)(struct ath10k *ar);
3208 u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3209
3210 switch (pci_dev->device) {
3211 case QCA988X_2_0_DEVICE_ID:
3212 hw_rev = ATH10K_HW_QCA988X;
3213 pci_ps = false;
3214 pci_soft_reset = ath10k_pci_warm_reset;
3215 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3216 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3217 break;
3218 case QCA9887_1_0_DEVICE_ID:
3219 hw_rev = ATH10K_HW_QCA9887;
3220 pci_ps = false;
3221 pci_soft_reset = ath10k_pci_warm_reset;
3222 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3223 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3224 break;
3225 case QCA6164_2_1_DEVICE_ID:
3226 case QCA6174_2_1_DEVICE_ID:
3227 hw_rev = ATH10K_HW_QCA6174;
3228 pci_ps = true;
3229 pci_soft_reset = ath10k_pci_warm_reset;
3230 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3231 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3232 break;
3233 case QCA99X0_2_0_DEVICE_ID:
3234 hw_rev = ATH10K_HW_QCA99X0;
3235 pci_ps = false;
3236 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3237 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3238 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3239 break;
3240 case QCA9984_1_0_DEVICE_ID:
3241 hw_rev = ATH10K_HW_QCA9984;
3242 pci_ps = false;
3243 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3244 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3245 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3246 break;
3247 case QCA9888_2_0_DEVICE_ID:
3248 hw_rev = ATH10K_HW_QCA9888;
3249 pci_ps = false;
3250 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3251 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3252 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3253 break;
3254 case QCA9377_1_0_DEVICE_ID:
3255 hw_rev = ATH10K_HW_QCA9377;
3256 pci_ps = true;
3257 pci_soft_reset = NULL;
3258 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3259 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3260 break;
3261 default:
3262 WARN_ON(1);
3263 return -ENOTSUPP;
3264 }
3265
3266 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3267 hw_rev, &ath10k_pci_hif_ops);
3268 if (!ar) {
3269 dev_err(&pdev->dev, "failed to allocate core\n");
3270 return -ENOMEM;
3271 }
3272
3273 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3274 pdev->vendor, pdev->device,
3275 pdev->subsystem_vendor, pdev->subsystem_device);
3276
3277 ar_pci = ath10k_pci_priv(ar);
3278 ar_pci->pdev = pdev;
3279 ar_pci->dev = &pdev->dev;
3280 ar_pci->ar = ar;
3281 ar->dev_id = pci_dev->device;
3282 ar_pci->pci_ps = pci_ps;
3283 ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3284 ar_pci->pci_soft_reset = pci_soft_reset;
3285 ar_pci->pci_hard_reset = pci_hard_reset;
3286 ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3287 ar->ce_priv = &ar_pci->ce;
3288
3289 ar->id.vendor = pdev->vendor;
3290 ar->id.device = pdev->device;
3291 ar->id.subsystem_vendor = pdev->subsystem_vendor;
3292 ar->id.subsystem_device = pdev->subsystem_device;
3293
3294 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
3295 (unsigned long)ar);
3296
3297 ret = ath10k_pci_setup_resource(ar);
3298 if (ret) {
3299 ath10k_err(ar, "failed to setup resource: %d\n", ret);
3300 goto err_core_destroy;
3301 }
3302
3303 ret = ath10k_pci_claim(ar);
3304 if (ret) {
3305 ath10k_err(ar, "failed to claim device: %d\n", ret);
3306 goto err_free_pipes;
3307 }
3308
3309 ret = ath10k_pci_force_wake(ar);
3310 if (ret) {
3311 ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3312 goto err_sleep;
3313 }
3314
3315 ath10k_pci_ce_deinit(ar);
3316 ath10k_pci_irq_disable(ar);
3317
3318 ret = ath10k_pci_init_irq(ar);
3319 if (ret) {
3320 ath10k_err(ar, "failed to init irqs: %d\n", ret);
3321 goto err_sleep;
3322 }
3323
3324 ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3325 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3326 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3327
3328 ret = ath10k_pci_request_irq(ar);
3329 if (ret) {
3330 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3331 goto err_deinit_irq;
3332 }
3333
3334 ret = ath10k_pci_chip_reset(ar);
3335 if (ret) {
3336 ath10k_err(ar, "failed to reset chip: %d\n", ret);
3337 goto err_free_irq;
3338 }
3339
3340 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3341 if (chip_id == 0xffffffff) {
3342 ath10k_err(ar, "failed to get chip id\n");
3343 goto err_free_irq;
3344 }
3345
3346 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
3347 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3348 pdev->device, chip_id);
3349 goto err_free_irq;
3350 }
3351
3352 ret = ath10k_core_register(ar, chip_id);
3353 if (ret) {
3354 ath10k_err(ar, "failed to register driver core: %d\n", ret);
3355 goto err_free_irq;
3356 }
3357
3358 return 0;
3359
3360err_free_irq:
3361 ath10k_pci_free_irq(ar);
3362 ath10k_pci_rx_retry_sync(ar);
3363
3364err_deinit_irq:
3365 ath10k_pci_deinit_irq(ar);
3366
3367err_sleep:
3368 ath10k_pci_sleep_sync(ar);
3369 ath10k_pci_release(ar);
3370
3371err_free_pipes:
3372 ath10k_pci_free_pipes(ar);
3373
3374err_core_destroy:
3375 ath10k_core_destroy(ar);
3376
3377 return ret;
3378}
3379
3380static void ath10k_pci_remove(struct pci_dev *pdev)
3381{
3382 struct ath10k *ar = pci_get_drvdata(pdev);
3383 struct ath10k_pci *ar_pci;
3384
3385 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3386
3387 if (!ar)
3388 return;
3389
3390 ar_pci = ath10k_pci_priv(ar);
3391
3392 if (!ar_pci)
3393 return;
3394
3395 ath10k_core_unregister(ar);
3396 ath10k_pci_free_irq(ar);
3397 ath10k_pci_deinit_irq(ar);
3398 ath10k_pci_release_resource(ar);
3399 ath10k_pci_sleep_sync(ar);
3400 ath10k_pci_release(ar);
3401 ath10k_core_destroy(ar);
3402}
3403
3404MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3405
3406static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3407{
3408 struct ath10k *ar = dev_get_drvdata(dev);
3409 int ret;
3410
3411 ret = ath10k_pci_suspend(ar);
3412 if (ret)
3413 ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3414
3415 return ret;
3416}
3417
3418static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3419{
3420 struct ath10k *ar = dev_get_drvdata(dev);
3421 int ret;
3422
3423 ret = ath10k_pci_resume(ar);
3424 if (ret)
3425 ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3426
3427 return ret;
3428}
3429
3430static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3431 ath10k_pci_pm_suspend,
3432 ath10k_pci_pm_resume);
3433
3434static struct pci_driver ath10k_pci_driver = {
3435 .name = "ath10k_pci",
3436 .id_table = ath10k_pci_id_table,
3437 .probe = ath10k_pci_probe,
3438 .remove = ath10k_pci_remove,
3439#ifdef CONFIG_PM
3440 .driver.pm = &ath10k_pci_pm_ops,
3441#endif
3442};
3443
3444static int __init ath10k_pci_init(void)
3445{
3446 int ret;
3447
3448 ret = pci_register_driver(&ath10k_pci_driver);
3449 if (ret)
3450 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3451 ret);
3452
3453 ret = ath10k_ahb_init();
3454 if (ret)
3455 printk(KERN_ERR "ahb init failed: %d\n", ret);
3456
3457 return ret;
3458}
3459module_init(ath10k_pci_init);
3460
3461static void __exit ath10k_pci_exit(void)
3462{
3463 pci_unregister_driver(&ath10k_pci_driver);
3464 ath10k_ahb_exit();
3465}
3466
3467module_exit(ath10k_pci_exit);
3468
3469MODULE_AUTHOR("Qualcomm Atheros");
3470MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3471MODULE_LICENSE("Dual BSD/GPL");
3472
3473
3474MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3475MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3476MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3477MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3478MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3479MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3480
3481
3482MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3483MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3484MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3485
3486
3487MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3488MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3489MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3490MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3491
3492
3493MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3494MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3495MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3496MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3497MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3498
3499
3500MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3501MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
3502