1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/module.h>
20#include <linux/mmc/card.h>
21#include <linux/mmc/mmc.h>
22#include <linux/mmc/host.h>
23#include <linux/mmc/sdio_func.h>
24#include <linux/mmc/sdio_ids.h>
25#include <linux/mmc/sdio.h>
26#include <linux/mmc/sd.h>
27#include <linux/bitfield.h>
28#include "core.h"
29#include "bmi.h"
30#include "debug.h"
31#include "hif.h"
32#include "htc.h"
33#include "mac.h"
34#include "targaddrs.h"
35#include "trace.h"
36#include "sdio.h"
37
38
39
40static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
41 size_t len)
42{
43 return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
44}
45
46static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
47{
48 return (enum ath10k_htc_ep_id)pipe_id;
49}
50
51static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
52{
53 dev_kfree_skb(pkt->skb);
54 pkt->skb = NULL;
55 pkt->alloc_len = 0;
56 pkt->act_len = 0;
57 pkt->trailer_only = false;
58}
59
60static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
61 size_t act_len, size_t full_len,
62 bool part_of_bundle,
63 bool last_in_bundle)
64{
65 pkt->skb = dev_alloc_skb(full_len);
66 if (!pkt->skb)
67 return -ENOMEM;
68
69 pkt->act_len = act_len;
70 pkt->alloc_len = full_len;
71 pkt->part_of_bundle = part_of_bundle;
72 pkt->last_in_bundle = last_in_bundle;
73 pkt->trailer_only = false;
74
75 return 0;
76}
77
78static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
79{
80 bool trailer_only = false;
81 struct ath10k_htc_hdr *htc_hdr =
82 (struct ath10k_htc_hdr *)pkt->skb->data;
83 u16 len = __le16_to_cpu(htc_hdr->len);
84
85 if (len == htc_hdr->trailer_len)
86 trailer_only = true;
87
88 return trailer_only;
89}
90
91
92
93static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
94 unsigned int address,
95 unsigned char val)
96{
97 *arg = FIELD_PREP(BIT(31), write) |
98 FIELD_PREP(BIT(27), raw) |
99 FIELD_PREP(BIT(26), 1) |
100 FIELD_PREP(GENMASK(25, 9), address) |
101 FIELD_PREP(BIT(8), 1) |
102 FIELD_PREP(GENMASK(7, 0), val);
103}
104
105static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
106 unsigned int address,
107 unsigned char byte)
108{
109 struct mmc_command io_cmd;
110
111 memset(&io_cmd, 0, sizeof(io_cmd));
112 ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
113 io_cmd.opcode = SD_IO_RW_DIRECT;
114 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
115
116 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
117}
118
119static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
120 unsigned int address,
121 unsigned char *byte)
122{
123 struct mmc_command io_cmd;
124 int ret;
125
126 memset(&io_cmd, 0, sizeof(io_cmd));
127 ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
128 io_cmd.opcode = SD_IO_RW_DIRECT;
129 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
130
131 ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
132 if (!ret)
133 *byte = io_cmd.resp[0];
134
135 return ret;
136}
137
138static int ath10k_sdio_config(struct ath10k *ar)
139{
140 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
141 struct sdio_func *func = ar_sdio->func;
142 unsigned char byte, asyncintdelay = 2;
143 int ret;
144
145 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
146
147 sdio_claim_host(func);
148
149 byte = 0;
150 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
151 SDIO_CCCR_DRIVE_STRENGTH,
152 &byte);
153
154 byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
155 byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
156 ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
157
158 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
159 SDIO_CCCR_DRIVE_STRENGTH,
160 byte);
161
162 byte = 0;
163 ret = ath10k_sdio_func0_cmd52_rd_byte(
164 func->card,
165 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
166 &byte);
167
168 byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
169 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
170 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
171
172 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
173 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
174 byte);
175 if (ret) {
176 ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
177 goto out;
178 }
179
180 byte = 0;
181 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
182 CCCR_SDIO_IRQ_MODE_REG_SDIO3,
183 &byte);
184
185 byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
186
187 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
188 CCCR_SDIO_IRQ_MODE_REG_SDIO3,
189 byte);
190 if (ret) {
191 ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
192 ret);
193 goto out;
194 }
195
196 byte = 0;
197 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
198 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
199 &byte);
200
201 byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
202 byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
203
204 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
205 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
206 byte);
207
208
209 func->enable_timeout = 100;
210
211 ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
212 if (ret) {
213 ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
214 ar_sdio->mbox_info.block_size, ret);
215 goto out;
216 }
217
218out:
219 sdio_release_host(func);
220 return ret;
221}
222
223static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
224{
225 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
226 struct sdio_func *func = ar_sdio->func;
227 int ret;
228
229 sdio_claim_host(func);
230
231 sdio_writel(func, val, addr, &ret);
232 if (ret) {
233 ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
234 val, addr, ret);
235 goto out;
236 }
237
238 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
239 addr, val);
240
241out:
242 sdio_release_host(func);
243
244 return ret;
245}
246
247static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
248{
249 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
250 struct sdio_func *func = ar_sdio->func;
251 __le32 *buf;
252 int ret;
253
254 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
255 if (!buf)
256 return -ENOMEM;
257
258 *buf = cpu_to_le32(val);
259
260 sdio_claim_host(func);
261
262 ret = sdio_writesb(func, addr, buf, sizeof(*buf));
263 if (ret) {
264 ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
265 val, addr, ret);
266 goto out;
267 }
268
269 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
270 addr, val);
271
272out:
273 sdio_release_host(func);
274
275 kfree(buf);
276
277 return ret;
278}
279
280static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
281{
282 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
283 struct sdio_func *func = ar_sdio->func;
284 int ret;
285
286 sdio_claim_host(func);
287 *val = sdio_readl(func, addr, &ret);
288 if (ret) {
289 ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
290 addr, ret);
291 goto out;
292 }
293
294 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
295 addr, *val);
296
297out:
298 sdio_release_host(func);
299
300 return ret;
301}
302
303static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
304{
305 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
306 struct sdio_func *func = ar_sdio->func;
307 int ret;
308
309 sdio_claim_host(func);
310
311 ret = sdio_memcpy_fromio(func, buf, addr, len);
312 if (ret) {
313 ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
314 addr, ret);
315 goto out;
316 }
317
318 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
319 addr, buf, len);
320 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
321
322out:
323 sdio_release_host(func);
324
325 return ret;
326}
327
328static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
329{
330 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
331 struct sdio_func *func = ar_sdio->func;
332 int ret;
333
334 sdio_claim_host(func);
335
336
337
338
339 ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
340 if (ret) {
341 ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
342 addr, ret);
343 goto out;
344 }
345
346 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
347 addr, buf, len);
348 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
349
350out:
351 sdio_release_host(func);
352
353 return ret;
354}
355
356static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
357{
358 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
359 struct sdio_func *func = ar_sdio->func;
360 int ret;
361
362 sdio_claim_host(func);
363
364 len = round_down(len, ar_sdio->mbox_info.block_size);
365
366 ret = sdio_readsb(func, buf, addr, len);
367 if (ret) {
368 ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
369 addr, ret);
370 goto out;
371 }
372
373 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
374 addr, buf, len);
375 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
376
377out:
378 sdio_release_host(func);
379
380 return ret;
381}
382
383
384
385static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
386 struct ath10k_sdio_rx_data *pkt,
387 u32 *lookaheads,
388 int *n_lookaheads)
389{
390 struct ath10k_htc *htc = &ar->htc;
391 struct sk_buff *skb = pkt->skb;
392 struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
393 bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
394 enum ath10k_htc_ep_id eid;
395 u16 payload_len;
396 u8 *trailer;
397 int ret;
398
399 payload_len = le16_to_cpu(htc_hdr->len);
400 skb->len = payload_len + sizeof(struct ath10k_htc_hdr);
401
402 if (trailer_present) {
403 trailer = skb->data + sizeof(*htc_hdr) +
404 payload_len - htc_hdr->trailer_len;
405
406 eid = pipe_id_to_eid(htc_hdr->eid);
407
408 ret = ath10k_htc_process_trailer(htc,
409 trailer,
410 htc_hdr->trailer_len,
411 eid,
412 lookaheads,
413 n_lookaheads);
414 if (ret)
415 return ret;
416
417 if (is_trailer_only_msg(pkt))
418 pkt->trailer_only = true;
419
420 skb_trim(skb, skb->len - htc_hdr->trailer_len);
421 }
422
423 skb_pull(skb, sizeof(*htc_hdr));
424
425 return 0;
426}
427
428static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
429 u32 lookaheads[],
430 int *n_lookahead)
431{
432 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
433 struct ath10k_htc *htc = &ar->htc;
434 struct ath10k_sdio_rx_data *pkt;
435 struct ath10k_htc_ep *ep;
436 enum ath10k_htc_ep_id id;
437 int ret, i, *n_lookahead_local;
438 u32 *lookaheads_local;
439 int lookahead_idx = 0;
440
441 for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
442 lookaheads_local = lookaheads;
443 n_lookahead_local = n_lookahead;
444
445 id = ((struct ath10k_htc_hdr *)
446 &lookaheads[lookahead_idx++])->eid;
447
448 if (id >= ATH10K_HTC_EP_COUNT) {
449 ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
450 id);
451 ret = -ENOMEM;
452 goto out;
453 }
454
455 ep = &htc->endpoint[id];
456
457 if (ep->service_id == 0) {
458 ath10k_warn(ar, "ep %d is not connected\n", id);
459 ret = -ENOMEM;
460 goto out;
461 }
462
463 pkt = &ar_sdio->rx_pkts[i];
464
465 if (pkt->part_of_bundle && !pkt->last_in_bundle) {
466
467
468
469 lookahead_idx--;
470 lookaheads_local = NULL;
471 n_lookahead_local = NULL;
472 }
473
474 ret = ath10k_sdio_mbox_rx_process_packet(ar,
475 pkt,
476 lookaheads_local,
477 n_lookahead_local);
478 if (ret)
479 goto out;
480
481 if (!pkt->trailer_only)
482 ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
483 else
484 kfree_skb(pkt->skb);
485
486
487 pkt->skb = NULL;
488 pkt->alloc_len = 0;
489 }
490
491 ret = 0;
492
493out:
494
495
496
497 for (; i < ar_sdio->n_rx_pkts; i++)
498 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
499
500 return ret;
501}
502
503static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
504 struct ath10k_sdio_rx_data *rx_pkts,
505 struct ath10k_htc_hdr *htc_hdr,
506 size_t full_len, size_t act_len,
507 size_t *bndl_cnt)
508{
509 int ret, i;
510
511 *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
512
513 if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
514 ath10k_warn(ar,
515 "HTC bundle length %u exceeds maximum %u\n",
516 le16_to_cpu(htc_hdr->len),
517 HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
518 return -ENOMEM;
519 }
520
521
522
523
524
525
526
527 for (i = 0; i < *bndl_cnt; i++) {
528 ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
529 act_len,
530 full_len,
531 true,
532 false);
533 if (ret)
534 return ret;
535 }
536
537 return 0;
538}
539
540static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
541 u32 lookaheads[], int n_lookaheads)
542{
543 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
544 struct ath10k_htc_hdr *htc_hdr;
545 size_t full_len, act_len;
546 bool last_in_bundle;
547 int ret, i;
548
549 if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
550 ath10k_warn(ar,
551 "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
552 n_lookaheads,
553 ATH10K_SDIO_MAX_RX_MSGS);
554 ret = -ENOMEM;
555 goto err;
556 }
557
558 for (i = 0; i < n_lookaheads; i++) {
559 htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
560 last_in_bundle = false;
561
562 if (le16_to_cpu(htc_hdr->len) >
563 ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
564 ath10k_warn(ar,
565 "payload length %d exceeds max htc length: %zu\n",
566 le16_to_cpu(htc_hdr->len),
567 ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
568 ret = -ENOMEM;
569 goto err;
570 }
571
572 act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
573 full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
574
575 if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
576 ath10k_warn(ar,
577 "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
578 htc_hdr->eid, htc_hdr->flags,
579 le16_to_cpu(htc_hdr->len));
580 ret = -EINVAL;
581 goto err;
582 }
583
584 if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
585
586
587
588
589 size_t bndl_cnt;
590
591 ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
592 &ar_sdio->rx_pkts[i],
593 htc_hdr,
594 full_len,
595 act_len,
596 &bndl_cnt);
597
598 n_lookaheads += bndl_cnt;
599 i += bndl_cnt;
600
601 last_in_bundle = true;
602 }
603
604
605
606
607
608 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
609 full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
610
611 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
612 act_len,
613 full_len,
614 last_in_bundle,
615 last_in_bundle);
616 }
617
618 ar_sdio->n_rx_pkts = i;
619
620 return 0;
621
622err:
623 for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
624 if (!ar_sdio->rx_pkts[i].alloc_len)
625 break;
626 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
627 }
628
629 return ret;
630}
631
632static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
633 struct ath10k_sdio_rx_data *pkt)
634{
635 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
636 struct sk_buff *skb = pkt->skb;
637 int ret;
638
639 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
640 skb->data, pkt->alloc_len);
641 pkt->status = ret;
642 if (!ret)
643 skb_put(skb, pkt->act_len);
644
645 return ret;
646}
647
648static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
649{
650 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
651 int ret, i;
652
653 for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
654 ret = ath10k_sdio_mbox_rx_packet(ar,
655 &ar_sdio->rx_pkts[i]);
656 if (ret)
657 goto err;
658 }
659
660 return 0;
661
662err:
663
664 for (; i < ar_sdio->n_rx_pkts; i++)
665 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
666
667 return ret;
668}
669
670
671
672
673
674
675#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
676
677static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
678 u32 msg_lookahead, bool *done)
679{
680 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
681 u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
682 int n_lookaheads = 1;
683 unsigned long timeout;
684 int ret;
685
686 *done = true;
687
688
689
690
691 lookaheads[0] = msg_lookahead;
692
693 timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
694 do {
695
696
697
698 ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
699 n_lookaheads);
700 if (ret)
701 break;
702
703 if (ar_sdio->n_rx_pkts >= 2)
704
705
706
707 *done = false;
708
709 ret = ath10k_sdio_mbox_rx_fetch(ar);
710
711
712
713
714
715 n_lookaheads = 0;
716 ret = ath10k_sdio_mbox_rx_process_packets(ar,
717 lookaheads,
718 &n_lookaheads);
719
720 if (!n_lookaheads || ret)
721 break;
722
723
724
725
726
727
728
729 *done = false;
730 } while (time_before(jiffies, timeout));
731
732 if (ret && (ret != -ECANCELED))
733 ath10k_warn(ar, "failed to get pending recv messages: %d\n",
734 ret);
735
736 return ret;
737}
738
739static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
740{
741 u32 val;
742 int ret;
743
744
745 ath10k_warn(ar, "firmware crashed\n");
746
747
748
749
750 ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
751 if (ret)
752 ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
753
754 return ret;
755}
756
757static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
758{
759 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
760 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
761 u8 counter_int_status;
762 int ret;
763
764 mutex_lock(&irq_data->mtx);
765 counter_int_status = irq_data->irq_proc_reg->counter_int_status &
766 irq_data->irq_en_reg->cntr_int_status_en;
767
768
769
770
771
772 if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
773 ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
774 else
775 ret = 0;
776
777 mutex_unlock(&irq_data->mtx);
778
779 return ret;
780}
781
782static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
783{
784 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
785 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
786 u8 error_int_status;
787 int ret;
788
789 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
790
791 error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
792 if (!error_int_status) {
793 ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
794 error_int_status);
795 return -EIO;
796 }
797
798 ath10k_dbg(ar, ATH10K_DBG_SDIO,
799 "sdio error_int_status 0x%x\n", error_int_status);
800
801 if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
802 error_int_status))
803 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
804
805 if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
806 error_int_status))
807 ath10k_warn(ar, "rx underflow interrupt error\n");
808
809 if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
810 error_int_status))
811 ath10k_warn(ar, "tx overflow interrupt error\n");
812
813
814 irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
815
816
817 ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
818 error_int_status);
819 if (ret) {
820 ath10k_warn(ar, "unable to write to error int status address: %d\n",
821 ret);
822 return ret;
823 }
824
825 return 0;
826}
827
828static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
829{
830 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
831 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
832 u8 cpu_int_status;
833 int ret;
834
835 mutex_lock(&irq_data->mtx);
836 cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
837 irq_data->irq_en_reg->cpu_int_status_en;
838 if (!cpu_int_status) {
839 ath10k_warn(ar, "CPU interrupt status is zero\n");
840 ret = -EIO;
841 goto out;
842 }
843
844
845 irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
846
847
848
849
850
851
852
853
854 ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
855 cpu_int_status);
856 if (ret) {
857 ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
858 ret);
859 goto out;
860 }
861
862out:
863 mutex_unlock(&irq_data->mtx);
864 return ret;
865}
866
867static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
868 u8 *host_int_status,
869 u32 *lookahead)
870{
871 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
872 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
873 struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
874 struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
875 u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
876 int ret;
877
878 mutex_lock(&irq_data->mtx);
879
880 *lookahead = 0;
881 *host_int_status = 0;
882
883
884
885
886
887
888
889 if (!irq_en_reg->int_status_en) {
890 ret = 0;
891 goto out;
892 }
893
894
895
896
897
898
899 ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
900 irq_proc_reg, sizeof(*irq_proc_reg));
901 if (ret)
902 goto out;
903
904
905 *host_int_status = irq_proc_reg->host_int_status &
906 irq_en_reg->int_status_en;
907
908
909 if (!(*host_int_status & htc_mbox)) {
910 *lookahead = 0;
911 ret = 0;
912 goto out;
913 }
914
915
916
917
918 *host_int_status &= ~htc_mbox;
919 if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
920 *lookahead = le32_to_cpu(
921 irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
922 if (!*lookahead)
923 ath10k_warn(ar, "sdio mbox lookahead is zero\n");
924 }
925
926out:
927 mutex_unlock(&irq_data->mtx);
928 return ret;
929}
930
931static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
932 bool *done)
933{
934 u8 host_int_status;
935 u32 lookahead;
936 int ret;
937
938
939
940
941
942
943
944 ret = ath10k_sdio_mbox_read_int_status(ar,
945 &host_int_status,
946 &lookahead);
947 if (ret) {
948 *done = true;
949 goto out;
950 }
951
952 if (!host_int_status && !lookahead) {
953 ret = 0;
954 *done = true;
955 goto out;
956 }
957
958 if (lookahead) {
959 ath10k_dbg(ar, ATH10K_DBG_SDIO,
960 "sdio pending mailbox msg lookahead 0x%08x\n",
961 lookahead);
962
963 ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
964 lookahead,
965 done);
966 if (ret)
967 goto out;
968 }
969
970
971 ath10k_dbg(ar, ATH10K_DBG_SDIO,
972 "sdio host_int_status 0x%x\n", host_int_status);
973
974 if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
975
976 ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
977 if (ret)
978 goto out;
979 }
980
981 if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
982
983 ret = ath10k_sdio_mbox_proc_err_intr(ar);
984 if (ret)
985 goto out;
986 }
987
988 if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
989
990 ret = ath10k_sdio_mbox_proc_counter_intr(ar);
991
992 ret = 0;
993
994out:
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1008 "sdio pending irqs done %d status %d",
1009 *done, ret);
1010
1011 return ret;
1012}
1013
1014static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
1015{
1016 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1017 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1018 u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
1019
1020 mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
1021 mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
1022 mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
1023 mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
1024 mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
1025
1026 mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
1027
1028 dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, device);
1029 dev_id_chiprev = FIELD_GET(QCA_MANUFACTURER_ID_REV_MASK, device);
1030 switch (dev_id_base) {
1031 case QCA_MANUFACTURER_ID_AR6005_BASE:
1032 if (dev_id_chiprev < 4)
1033 mbox_info->ext_info[0].htc_ext_sz =
1034 ATH10K_HIF_MBOX0_EXT_WIDTH;
1035 else
1036
1037
1038
1039 mbox_info->ext_info[0].htc_ext_sz =
1040 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1041 break;
1042 case QCA_MANUFACTURER_ID_QCA9377_BASE:
1043 mbox_info->ext_info[0].htc_ext_sz =
1044 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1045 break;
1046 default:
1047 mbox_info->ext_info[0].htc_ext_sz =
1048 ATH10K_HIF_MBOX0_EXT_WIDTH;
1049 }
1050
1051 mbox_info->ext_info[1].htc_ext_addr =
1052 mbox_info->ext_info[0].htc_ext_addr +
1053 mbox_info->ext_info[0].htc_ext_sz +
1054 ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
1055 mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
1056}
1057
1058
1059
1060static int ath10k_sdio_bmi_credits(struct ath10k *ar)
1061{
1062 u32 addr, cmd_credits;
1063 unsigned long timeout;
1064 int ret;
1065
1066
1067 addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
1068 timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1069 cmd_credits = 0;
1070
1071 while (time_before(jiffies, timeout) && !cmd_credits) {
1072
1073
1074
1075
1076
1077 ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
1078 if (ret) {
1079 ath10k_warn(ar,
1080 "unable to decrement the command credit count register: %d\n",
1081 ret);
1082 return ret;
1083 }
1084
1085
1086
1087
1088 cmd_credits &= 0xFF;
1089 }
1090
1091 if (!cmd_credits) {
1092 ath10k_warn(ar, "bmi communication timeout\n");
1093 return -ETIMEDOUT;
1094 }
1095
1096 return 0;
1097}
1098
1099static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
1100{
1101 unsigned long timeout;
1102 u32 rx_word;
1103 int ret;
1104
1105 timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1106 rx_word = 0;
1107
1108 while ((time_before(jiffies, timeout)) && !rx_word) {
1109 ret = ath10k_sdio_read32(ar,
1110 MBOX_HOST_INT_STATUS_ADDRESS,
1111 &rx_word);
1112 if (ret) {
1113 ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
1114 return ret;
1115 }
1116
1117
1118 rx_word &= 1;
1119 }
1120
1121 if (!rx_word) {
1122 ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
1123 return -EINVAL;
1124 }
1125
1126 return ret;
1127}
1128
1129static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
1130 void *req, u32 req_len,
1131 void *resp, u32 *resp_len)
1132{
1133 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1134 u32 addr;
1135 int ret;
1136
1137 if (req) {
1138 ret = ath10k_sdio_bmi_credits(ar);
1139 if (ret)
1140 return ret;
1141
1142 addr = ar_sdio->mbox_info.htc_addr;
1143
1144 memcpy(ar_sdio->bmi_buf, req, req_len);
1145 ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
1146 if (ret) {
1147 ath10k_warn(ar,
1148 "unable to send the bmi data to the device: %d\n",
1149 ret);
1150 return ret;
1151 }
1152 }
1153
1154 if (!resp || !resp_len)
1155
1156 return 0;
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203 ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
1204 if (ret)
1205 return ret;
1206
1207
1208 addr = ar_sdio->mbox_info.htc_addr;
1209 ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
1210 if (ret) {
1211 ath10k_warn(ar,
1212 "unable to read the bmi data from the device: %d\n",
1213 ret);
1214 return ret;
1215 }
1216
1217 memcpy(resp, ar_sdio->bmi_buf, *resp_len);
1218
1219 return 0;
1220}
1221
1222
1223
1224static struct ath10k_sdio_bus_request
1225*ath10k_sdio_alloc_busreq(struct ath10k *ar)
1226{
1227 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1228 struct ath10k_sdio_bus_request *bus_req;
1229
1230 spin_lock_bh(&ar_sdio->lock);
1231
1232 if (list_empty(&ar_sdio->bus_req_freeq)) {
1233 bus_req = NULL;
1234 goto out;
1235 }
1236
1237 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
1238 struct ath10k_sdio_bus_request, list);
1239 list_del(&bus_req->list);
1240
1241out:
1242 spin_unlock_bh(&ar_sdio->lock);
1243 return bus_req;
1244}
1245
1246static void ath10k_sdio_free_bus_req(struct ath10k *ar,
1247 struct ath10k_sdio_bus_request *bus_req)
1248{
1249 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1250
1251 memset(bus_req, 0, sizeof(*bus_req));
1252
1253 spin_lock_bh(&ar_sdio->lock);
1254 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
1255 spin_unlock_bh(&ar_sdio->lock);
1256}
1257
1258static void __ath10k_sdio_write_async(struct ath10k *ar,
1259 struct ath10k_sdio_bus_request *req)
1260{
1261 struct ath10k_htc_ep *ep;
1262 struct sk_buff *skb;
1263 int ret;
1264
1265 skb = req->skb;
1266 ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
1267 if (ret)
1268 ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
1269 req->address, ret);
1270
1271 if (req->htc_msg) {
1272 ep = &ar->htc.endpoint[req->eid];
1273 ath10k_htc_notify_tx_completion(ep, skb);
1274 } else if (req->comp) {
1275 complete(req->comp);
1276 }
1277
1278 ath10k_sdio_free_bus_req(ar, req);
1279}
1280
1281static void ath10k_sdio_write_async_work(struct work_struct *work)
1282{
1283 struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1284 wr_async_work);
1285 struct ath10k *ar = ar_sdio->ar;
1286 struct ath10k_sdio_bus_request *req, *tmp_req;
1287
1288 spin_lock_bh(&ar_sdio->wr_async_lock);
1289
1290 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1291 list_del(&req->list);
1292 spin_unlock_bh(&ar_sdio->wr_async_lock);
1293 __ath10k_sdio_write_async(ar, req);
1294 spin_lock_bh(&ar_sdio->wr_async_lock);
1295 }
1296
1297 spin_unlock_bh(&ar_sdio->wr_async_lock);
1298}
1299
1300static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
1301 struct sk_buff *skb,
1302 struct completion *comp,
1303 bool htc_msg, enum ath10k_htc_ep_id eid)
1304{
1305 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1306 struct ath10k_sdio_bus_request *bus_req;
1307
1308
1309
1310
1311 bus_req = ath10k_sdio_alloc_busreq(ar);
1312 if (!bus_req) {
1313 ath10k_warn(ar,
1314 "unable to allocate bus request for async request\n");
1315 return -ENOMEM;
1316 }
1317
1318 bus_req->skb = skb;
1319 bus_req->eid = eid;
1320 bus_req->address = addr;
1321 bus_req->htc_msg = htc_msg;
1322 bus_req->comp = comp;
1323
1324 spin_lock_bh(&ar_sdio->wr_async_lock);
1325 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
1326 spin_unlock_bh(&ar_sdio->wr_async_lock);
1327
1328 return 0;
1329}
1330
1331
1332
1333static void ath10k_sdio_irq_handler(struct sdio_func *func)
1334{
1335 struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
1336 struct ath10k *ar = ar_sdio->ar;
1337 unsigned long timeout;
1338 bool done = false;
1339 int ret;
1340
1341
1342
1343
1344 sdio_release_host(ar_sdio->func);
1345
1346 timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
1347 do {
1348 ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
1349 if (ret)
1350 break;
1351 } while (time_before(jiffies, timeout) && !done);
1352
1353 ath10k_mac_tx_push_pending(ar);
1354
1355 sdio_claim_host(ar_sdio->func);
1356
1357 if (ret && ret != -ECANCELED)
1358 ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
1359 ret);
1360}
1361
1362
1363
1364static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
1365{
1366 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1367 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1368 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1369 int ret;
1370
1371 mutex_lock(&irq_data->mtx);
1372
1373 memset(regs, 0, sizeof(*regs));
1374 ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1375 ®s->int_status_en, sizeof(*regs));
1376 if (ret)
1377 ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
1378
1379 mutex_unlock(&irq_data->mtx);
1380
1381 return ret;
1382}
1383
1384static int ath10k_sdio_hif_power_up(struct ath10k *ar)
1385{
1386 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1387 struct sdio_func *func = ar_sdio->func;
1388 int ret;
1389
1390 if (!ar_sdio->is_disabled)
1391 return 0;
1392
1393 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
1394
1395 sdio_claim_host(func);
1396
1397 ret = sdio_enable_func(func);
1398 if (ret) {
1399 ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
1400 sdio_release_host(func);
1401 return ret;
1402 }
1403
1404 sdio_release_host(func);
1405
1406
1407
1408
1409 msleep(20);
1410
1411 ar_sdio->is_disabled = false;
1412
1413 ret = ath10k_sdio_hif_disable_intrs(ar);
1414 if (ret)
1415 return ret;
1416
1417 return 0;
1418}
1419
1420static void ath10k_sdio_hif_power_down(struct ath10k *ar)
1421{
1422 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1423 int ret;
1424
1425 if (ar_sdio->is_disabled)
1426 return;
1427
1428 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
1429
1430
1431 sdio_claim_host(ar_sdio->func);
1432 ret = sdio_disable_func(ar_sdio->func);
1433 sdio_release_host(ar_sdio->func);
1434
1435 if (ret)
1436 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
1437
1438 ar_sdio->is_disabled = true;
1439}
1440
1441static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1442 struct ath10k_hif_sg_item *items, int n_items)
1443{
1444 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1445 enum ath10k_htc_ep_id eid;
1446 struct sk_buff *skb;
1447 int ret, i;
1448
1449 eid = pipe_id_to_eid(pipe_id);
1450
1451 for (i = 0; i < n_items; i++) {
1452 size_t padded_len;
1453 u32 address;
1454
1455 skb = items[i].transfer_context;
1456 padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
1457 skb->len);
1458 skb_trim(skb, padded_len);
1459
1460
1461 address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
1462 skb->len;
1463 ret = ath10k_sdio_prep_async_req(ar, address, skb,
1464 NULL, true, eid);
1465 if (ret)
1466 return ret;
1467 }
1468
1469 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1470
1471 return 0;
1472}
1473
1474static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
1475{
1476 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1477 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1478 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1479 int ret;
1480
1481 mutex_lock(&irq_data->mtx);
1482
1483
1484 regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
1485 FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
1486 FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
1487
1488
1489
1490
1491 regs->int_status_en |=
1492 FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
1493
1494
1495 regs->cpu_int_status_en = 0;
1496
1497
1498 regs->err_int_status_en =
1499 FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
1500 FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
1501
1502
1503
1504
1505 regs->cntr_int_status_en =
1506 FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
1507 ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
1508
1509 ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1510 ®s->int_status_en, sizeof(*regs));
1511 if (ret)
1512 ath10k_warn(ar,
1513 "failed to update mbox interrupt status register : %d\n",
1514 ret);
1515
1516 mutex_unlock(&irq_data->mtx);
1517 return ret;
1518}
1519
1520static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1521{
1522 u32 val;
1523 int ret;
1524
1525 ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1526 if (ret) {
1527 ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1528 ret);
1529 return ret;
1530 }
1531
1532 if (enable_sleep)
1533 val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1534 else
1535 val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1536
1537 ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1538 if (ret) {
1539 ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1540 ret);
1541 return ret;
1542 }
1543
1544 return 0;
1545}
1546
1547
1548
1549static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1550 size_t buf_len)
1551{
1552 int ret;
1553
1554
1555 ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
1556 if (ret) {
1557 ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
1558 return ret;
1559 }
1560
1561
1562 ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
1563 if (ret) {
1564 ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
1565 ret);
1566 return ret;
1567 }
1568
1569 return 0;
1570}
1571
1572static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
1573 u32 *value)
1574{
1575 __le32 *val;
1576 int ret;
1577
1578 val = kzalloc(sizeof(*val), GFP_KERNEL);
1579 if (!val)
1580 return -ENOMEM;
1581
1582 ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
1583 if (ret)
1584 goto out;
1585
1586 *value = __le32_to_cpu(*val);
1587
1588out:
1589 kfree(val);
1590
1591 return ret;
1592}
1593
1594static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
1595 const void *data, int nbytes)
1596{
1597 int ret;
1598
1599
1600 ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
1601 if (ret) {
1602 ath10k_warn(ar,
1603 "failed to write 0x%p to mbox window data address: %d\n",
1604 data, ret);
1605 return ret;
1606 }
1607
1608
1609 ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
1610 if (ret) {
1611 ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
1612 return ret;
1613 }
1614
1615 return 0;
1616}
1617
1618
1619
1620static int ath10k_sdio_hif_start(struct ath10k *ar)
1621{
1622 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1623 u32 addr, val;
1624 int ret;
1625
1626
1627
1628
1629
1630 msleep(20);
1631 ret = ath10k_sdio_hif_disable_intrs(ar);
1632 if (ret)
1633 return ret;
1634
1635
1636
1637
1638 ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1639 ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1640
1641 sdio_claim_host(ar_sdio->func);
1642
1643
1644 ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
1645 if (ret) {
1646 ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
1647 sdio_release_host(ar_sdio->func);
1648 return ret;
1649 }
1650
1651 sdio_release_host(ar_sdio->func);
1652
1653 ret = ath10k_sdio_hif_enable_intrs(ar);
1654 if (ret)
1655 ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
1656
1657 addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1658
1659 ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
1660 if (ret) {
1661 ath10k_warn(ar, "unable to read hi_acs_flags address: %d\n", ret);
1662 return ret;
1663 }
1664
1665 if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1666 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1667 "sdio mailbox swap service enabled\n");
1668 ar_sdio->swap_mbox = true;
1669 }
1670
1671
1672 ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
1673 if (ret)
1674 return ret;
1675
1676
1677 msleep(20);
1678
1679 ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
1680 if (ret)
1681 return ret;
1682
1683 return 0;
1684}
1685
1686#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
1687
1688static void ath10k_sdio_irq_disable(struct ath10k *ar)
1689{
1690 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1691 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1692 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1693 struct sk_buff *skb;
1694 struct completion irqs_disabled_comp;
1695 int ret;
1696
1697 skb = dev_alloc_skb(sizeof(*regs));
1698 if (!skb)
1699 return;
1700
1701 mutex_lock(&irq_data->mtx);
1702
1703 memset(regs, 0, sizeof(*regs));
1704 memcpy(skb->data, regs, sizeof(*regs));
1705 skb_put(skb, sizeof(*regs));
1706
1707 mutex_unlock(&irq_data->mtx);
1708
1709 init_completion(&irqs_disabled_comp);
1710 ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1711 skb, &irqs_disabled_comp, false, 0);
1712 if (ret)
1713 goto out;
1714
1715 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1716
1717
1718
1719
1720 ret = wait_for_completion_timeout(&irqs_disabled_comp,
1721 SDIO_IRQ_DISABLE_TIMEOUT_HZ);
1722 if (!ret)
1723 ath10k_warn(ar, "sdio irq disable request timed out\n");
1724
1725 sdio_claim_host(ar_sdio->func);
1726
1727 ret = sdio_release_irq(ar_sdio->func);
1728 if (ret)
1729 ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
1730
1731 sdio_release_host(ar_sdio->func);
1732
1733out:
1734 kfree_skb(skb);
1735}
1736
1737static void ath10k_sdio_hif_stop(struct ath10k *ar)
1738{
1739 struct ath10k_sdio_bus_request *req, *tmp_req;
1740 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1741
1742 ath10k_sdio_irq_disable(ar);
1743
1744 cancel_work_sync(&ar_sdio->wr_async_work);
1745
1746 spin_lock_bh(&ar_sdio->wr_async_lock);
1747
1748
1749 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1750 struct ath10k_htc_ep *ep;
1751
1752 list_del(&req->list);
1753
1754 if (req->htc_msg) {
1755 ep = &ar->htc.endpoint[req->eid];
1756 ath10k_htc_notify_tx_completion(ep, req->skb);
1757 } else if (req->skb) {
1758 kfree_skb(req->skb);
1759 }
1760 ath10k_sdio_free_bus_req(ar, req);
1761 }
1762
1763 spin_unlock_bh(&ar_sdio->wr_async_lock);
1764}
1765
1766#ifdef CONFIG_PM
1767
1768static int ath10k_sdio_hif_suspend(struct ath10k *ar)
1769{
1770 return -EOPNOTSUPP;
1771}
1772
1773static int ath10k_sdio_hif_resume(struct ath10k *ar)
1774{
1775 switch (ar->state) {
1776 case ATH10K_STATE_OFF:
1777 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1778 "sdio resume configuring sdio\n");
1779
1780
1781 ath10k_sdio_config(ar);
1782 break;
1783
1784 case ATH10K_STATE_ON:
1785 default:
1786 break;
1787 }
1788
1789 return 0;
1790}
1791#endif
1792
1793static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
1794 u16 service_id,
1795 u8 *ul_pipe, u8 *dl_pipe)
1796{
1797 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1798 struct ath10k_htc *htc = &ar->htc;
1799 u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
1800 enum ath10k_htc_ep_id eid;
1801 bool ep_found = false;
1802 int i;
1803
1804
1805
1806
1807
1808
1809 for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
1810 if (htc->endpoint[i].service_id == service_id) {
1811 eid = htc->endpoint[i].eid;
1812 ep_found = true;
1813 break;
1814 }
1815 }
1816
1817 if (!ep_found)
1818 return -EINVAL;
1819
1820
1821
1822
1823 *ul_pipe = *dl_pipe = (u8)eid;
1824
1825
1826
1827
1828
1829
1830 if (ar_sdio->swap_mbox) {
1831 htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1832 wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
1833 htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1834 wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
1835 } else {
1836 htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
1837 wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1838 htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
1839 wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1840 }
1841
1842 switch (service_id) {
1843 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1844
1845
1846
1847 break;
1848 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1849 ar_sdio->mbox_addr[eid] = wmi_addr;
1850 ar_sdio->mbox_size[eid] = wmi_mbox_size;
1851 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1852 "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
1853 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
1854 break;
1855 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1856 ar_sdio->mbox_addr[eid] = htt_addr;
1857 ar_sdio->mbox_size[eid] = htt_mbox_size;
1858 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1859 "sdio htt data mbox_addr 0x%x mbox_size %d\n",
1860 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
1861 break;
1862 default:
1863 ath10k_warn(ar, "unsupported HTC service id: %d\n",
1864 service_id);
1865 return -EINVAL;
1866 }
1867
1868 return 0;
1869}
1870
1871static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
1872 u8 *ul_pipe, u8 *dl_pipe)
1873{
1874 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
1875
1876
1877
1878
1879 *ul_pipe = 0;
1880 *dl_pipe = 0;
1881}
1882
1883
1884
1885
1886
1887
1888
1889static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
1890 u8 pipe, int force)
1891{
1892}
1893
1894static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
1895 .tx_sg = ath10k_sdio_hif_tx_sg,
1896 .diag_read = ath10k_sdio_hif_diag_read,
1897 .diag_write = ath10k_sdio_hif_diag_write_mem,
1898 .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
1899 .start = ath10k_sdio_hif_start,
1900 .stop = ath10k_sdio_hif_stop,
1901 .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
1902 .get_default_pipe = ath10k_sdio_hif_get_default_pipe,
1903 .send_complete_check = ath10k_sdio_hif_send_complete_check,
1904 .power_up = ath10k_sdio_hif_power_up,
1905 .power_down = ath10k_sdio_hif_power_down,
1906#ifdef CONFIG_PM
1907 .suspend = ath10k_sdio_hif_suspend,
1908 .resume = ath10k_sdio_hif_resume,
1909#endif
1910};
1911
1912#ifdef CONFIG_PM_SLEEP
1913
1914
1915
1916
1917static int ath10k_sdio_pm_suspend(struct device *device)
1918{
1919 return 0;
1920}
1921
1922static int ath10k_sdio_pm_resume(struct device *device)
1923{
1924 return 0;
1925}
1926
1927static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
1928 ath10k_sdio_pm_resume);
1929
1930#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
1931
1932#else
1933
1934#define ATH10K_SDIO_PM_OPS NULL
1935
1936#endif
1937
1938static int ath10k_sdio_probe(struct sdio_func *func,
1939 const struct sdio_device_id *id)
1940{
1941 struct ath10k_sdio *ar_sdio;
1942 struct ath10k *ar;
1943 enum ath10k_hw_rev hw_rev;
1944 u32 chip_id, dev_id_base;
1945 int ret, i;
1946
1947
1948
1949
1950
1951
1952
1953 hw_rev = ATH10K_HW_QCA6174;
1954
1955 ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
1956 hw_rev, &ath10k_sdio_hif_ops);
1957 if (!ar) {
1958 dev_err(&func->dev, "failed to allocate core\n");
1959 return -ENOMEM;
1960 }
1961
1962 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1963 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
1964 func->num, func->vendor, func->device,
1965 func->max_blksize, func->cur_blksize);
1966
1967 ar_sdio = ath10k_sdio_priv(ar);
1968
1969 ar_sdio->irq_data.irq_proc_reg =
1970 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
1971 GFP_KERNEL);
1972 if (!ar_sdio->irq_data.irq_proc_reg) {
1973 ret = -ENOMEM;
1974 goto err_core_destroy;
1975 }
1976
1977 ar_sdio->irq_data.irq_en_reg =
1978 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
1979 GFP_KERNEL);
1980 if (!ar_sdio->irq_data.irq_en_reg) {
1981 ret = -ENOMEM;
1982 goto err_core_destroy;
1983 }
1984
1985 ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
1986 if (!ar_sdio->bmi_buf) {
1987 ret = -ENOMEM;
1988 goto err_core_destroy;
1989 }
1990
1991 ar_sdio->func = func;
1992 sdio_set_drvdata(func, ar_sdio);
1993
1994 ar_sdio->is_disabled = true;
1995 ar_sdio->ar = ar;
1996
1997 spin_lock_init(&ar_sdio->lock);
1998 spin_lock_init(&ar_sdio->wr_async_lock);
1999 mutex_init(&ar_sdio->irq_data.mtx);
2000
2001 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
2002 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
2003
2004 INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
2005 ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
2006 if (!ar_sdio->workqueue) {
2007 ret = -ENOMEM;
2008 goto err_core_destroy;
2009 }
2010
2011 for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
2012 ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
2013
2014 dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
2015 switch (dev_id_base) {
2016 case QCA_MANUFACTURER_ID_AR6005_BASE:
2017 case QCA_MANUFACTURER_ID_QCA9377_BASE:
2018 ar->dev_id = QCA9377_1_0_DEVICE_ID;
2019 break;
2020 default:
2021 ret = -ENODEV;
2022 ath10k_err(ar, "unsupported device id %u (0x%x)\n",
2023 dev_id_base, id->device);
2024 goto err_free_wq;
2025 }
2026
2027 ar->id.vendor = id->vendor;
2028 ar->id.device = id->device;
2029
2030 ath10k_sdio_set_mbox_info(ar);
2031
2032 ret = ath10k_sdio_config(ar);
2033 if (ret) {
2034 ath10k_err(ar, "failed to config sdio: %d\n", ret);
2035 goto err_free_wq;
2036 }
2037
2038
2039 chip_id = 0;
2040 ret = ath10k_core_register(ar, chip_id);
2041 if (ret) {
2042 ath10k_err(ar, "failed to register driver core: %d\n", ret);
2043 goto err_free_wq;
2044 }
2045
2046
2047 ath10k_warn(ar, "WARNING: ath10k SDIO support is incomplete, don't expect anything to work!\n");
2048
2049 return 0;
2050
2051err_free_wq:
2052 destroy_workqueue(ar_sdio->workqueue);
2053err_core_destroy:
2054 ath10k_core_destroy(ar);
2055
2056 return ret;
2057}
2058
2059static void ath10k_sdio_remove(struct sdio_func *func)
2060{
2061 struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2062 struct ath10k *ar = ar_sdio->ar;
2063
2064 ath10k_dbg(ar, ATH10K_DBG_BOOT,
2065 "sdio removed func %d vendor 0x%x device 0x%x\n",
2066 func->num, func->vendor, func->device);
2067
2068 (void)ath10k_sdio_hif_disable_intrs(ar);
2069 cancel_work_sync(&ar_sdio->wr_async_work);
2070 ath10k_core_unregister(ar);
2071 ath10k_core_destroy(ar);
2072}
2073
2074static const struct sdio_device_id ath10k_sdio_devices[] = {
2075 {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
2076 (QCA_SDIO_ID_AR6005_BASE | 0xA))},
2077 {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
2078 (QCA_SDIO_ID_QCA9377_BASE | 0x1))},
2079 {},
2080};
2081
2082MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2083
2084static struct sdio_driver ath10k_sdio_driver = {
2085 .name = "ath10k_sdio",
2086 .id_table = ath10k_sdio_devices,
2087 .probe = ath10k_sdio_probe,
2088 .remove = ath10k_sdio_remove,
2089 .drv.pm = ATH10K_SDIO_PM_OPS,
2090};
2091
2092static int __init ath10k_sdio_init(void)
2093{
2094 int ret;
2095
2096 ret = sdio_register_driver(&ath10k_sdio_driver);
2097 if (ret)
2098 pr_err("sdio driver registration failed: %d\n", ret);
2099
2100 return ret;
2101}
2102
2103static void __exit ath10k_sdio_exit(void)
2104{
2105 sdio_unregister_driver(&ath10k_sdio_driver);
2106}
2107
2108module_init(ath10k_sdio_init);
2109module_exit(ath10k_sdio_exit);
2110
2111MODULE_AUTHOR("Qualcomm Atheros");
2112MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
2113MODULE_LICENSE("Dual BSD/GPL");
2114