1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/mmc/card.h>
10#include <linux/mmc/mmc.h>
11#include <linux/mmc/host.h>
12#include <linux/mmc/sdio_func.h>
13#include <linux/mmc/sdio_ids.h>
14#include <linux/mmc/sdio.h>
15#include <linux/mmc/sd.h>
16#include <linux/bitfield.h>
17#include "core.h"
18#include "bmi.h"
19#include "debug.h"
20#include "hif.h"
21#include "htc.h"
22#include "mac.h"
23#include "targaddrs.h"
24#include "trace.h"
25#include "sdio.h"
26
27#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
28
29
30
31static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
32 size_t len)
33{
34 return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
35}
36
37static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
38{
39 return (enum ath10k_htc_ep_id)pipe_id;
40}
41
42static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
43{
44 dev_kfree_skb(pkt->skb);
45 pkt->skb = NULL;
46 pkt->alloc_len = 0;
47 pkt->act_len = 0;
48 pkt->trailer_only = false;
49}
50
51static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
52 size_t act_len, size_t full_len,
53 bool part_of_bundle,
54 bool last_in_bundle)
55{
56 pkt->skb = dev_alloc_skb(full_len);
57 if (!pkt->skb)
58 return -ENOMEM;
59
60 pkt->act_len = act_len;
61 pkt->alloc_len = full_len;
62 pkt->part_of_bundle = part_of_bundle;
63 pkt->last_in_bundle = last_in_bundle;
64 pkt->trailer_only = false;
65
66 return 0;
67}
68
69static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
70{
71 bool trailer_only = false;
72 struct ath10k_htc_hdr *htc_hdr =
73 (struct ath10k_htc_hdr *)pkt->skb->data;
74 u16 len = __le16_to_cpu(htc_hdr->len);
75
76 if (len == htc_hdr->trailer_len)
77 trailer_only = true;
78
79 return trailer_only;
80}
81
82
83
84static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
85 unsigned int address,
86 unsigned char val)
87{
88 *arg = FIELD_PREP(BIT(31), write) |
89 FIELD_PREP(BIT(27), raw) |
90 FIELD_PREP(BIT(26), 1) |
91 FIELD_PREP(GENMASK(25, 9), address) |
92 FIELD_PREP(BIT(8), 1) |
93 FIELD_PREP(GENMASK(7, 0), val);
94}
95
96static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
97 unsigned int address,
98 unsigned char byte)
99{
100 struct mmc_command io_cmd;
101
102 memset(&io_cmd, 0, sizeof(io_cmd));
103 ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
104 io_cmd.opcode = SD_IO_RW_DIRECT;
105 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
106
107 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
108}
109
110static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
111 unsigned int address,
112 unsigned char *byte)
113{
114 struct mmc_command io_cmd;
115 int ret;
116
117 memset(&io_cmd, 0, sizeof(io_cmd));
118 ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
119 io_cmd.opcode = SD_IO_RW_DIRECT;
120 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
121
122 ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
123 if (!ret)
124 *byte = io_cmd.resp[0];
125
126 return ret;
127}
128
129static int ath10k_sdio_config(struct ath10k *ar)
130{
131 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
132 struct sdio_func *func = ar_sdio->func;
133 unsigned char byte, asyncintdelay = 2;
134 int ret;
135
136 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
137
138 sdio_claim_host(func);
139
140 byte = 0;
141 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
142 SDIO_CCCR_DRIVE_STRENGTH,
143 &byte);
144
145 byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
146 byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
147 ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
148
149 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
150 SDIO_CCCR_DRIVE_STRENGTH,
151 byte);
152
153 byte = 0;
154 ret = ath10k_sdio_func0_cmd52_rd_byte(
155 func->card,
156 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
157 &byte);
158
159 byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
160 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
161 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
162
163 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
164 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
165 byte);
166 if (ret) {
167 ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
168 goto out;
169 }
170
171 byte = 0;
172 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
173 CCCR_SDIO_IRQ_MODE_REG_SDIO3,
174 &byte);
175
176 byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
177
178 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
179 CCCR_SDIO_IRQ_MODE_REG_SDIO3,
180 byte);
181 if (ret) {
182 ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
183 ret);
184 goto out;
185 }
186
187 byte = 0;
188 ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
189 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
190 &byte);
191
192 byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
193 byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
194
195 ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
196 CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
197 byte);
198
199
200 func->enable_timeout = 100;
201
202 ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
203 if (ret) {
204 ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
205 ar_sdio->mbox_info.block_size, ret);
206 goto out;
207 }
208
209out:
210 sdio_release_host(func);
211 return ret;
212}
213
214static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
215{
216 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
217 struct sdio_func *func = ar_sdio->func;
218 int ret;
219
220 sdio_claim_host(func);
221
222 sdio_writel(func, val, addr, &ret);
223 if (ret) {
224 ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
225 val, addr, ret);
226 goto out;
227 }
228
229 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
230 addr, val);
231
232out:
233 sdio_release_host(func);
234
235 return ret;
236}
237
238static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
239{
240 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
241 struct sdio_func *func = ar_sdio->func;
242 __le32 *buf;
243 int ret;
244
245 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
246 if (!buf)
247 return -ENOMEM;
248
249 *buf = cpu_to_le32(val);
250
251 sdio_claim_host(func);
252
253 ret = sdio_writesb(func, addr, buf, sizeof(*buf));
254 if (ret) {
255 ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
256 val, addr, ret);
257 goto out;
258 }
259
260 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
261 addr, val);
262
263out:
264 sdio_release_host(func);
265
266 kfree(buf);
267
268 return ret;
269}
270
271static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
272{
273 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
274 struct sdio_func *func = ar_sdio->func;
275 int ret;
276
277 sdio_claim_host(func);
278 *val = sdio_readl(func, addr, &ret);
279 if (ret) {
280 ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
281 addr, ret);
282 goto out;
283 }
284
285 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
286 addr, *val);
287
288out:
289 sdio_release_host(func);
290
291 return ret;
292}
293
294static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
295{
296 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
297 struct sdio_func *func = ar_sdio->func;
298 int ret;
299
300 sdio_claim_host(func);
301
302 ret = sdio_memcpy_fromio(func, buf, addr, len);
303 if (ret) {
304 ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
305 addr, ret);
306 goto out;
307 }
308
309 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
310 addr, buf, len);
311 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
312
313out:
314 sdio_release_host(func);
315
316 return ret;
317}
318
319static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
320{
321 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
322 struct sdio_func *func = ar_sdio->func;
323 int ret;
324
325 sdio_claim_host(func);
326
327
328
329
330 ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
331 if (ret) {
332 ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
333 addr, ret);
334 goto out;
335 }
336
337 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
338 addr, buf, len);
339 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
340
341out:
342 sdio_release_host(func);
343
344 return ret;
345}
346
347static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
348{
349 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
350 struct sdio_func *func = ar_sdio->func;
351 int ret;
352
353 sdio_claim_host(func);
354
355 len = round_down(len, ar_sdio->mbox_info.block_size);
356
357 ret = sdio_readsb(func, buf, addr, len);
358 if (ret) {
359 ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
360 addr, ret);
361 goto out;
362 }
363
364 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
365 addr, buf, len);
366 ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
367
368out:
369 sdio_release_host(func);
370
371 return ret;
372}
373
374
375
376static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
377 struct ath10k_sdio_rx_data *pkt,
378 u32 *lookaheads,
379 int *n_lookaheads)
380{
381 struct ath10k_htc *htc = &ar->htc;
382 struct sk_buff *skb = pkt->skb;
383 struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
384 bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
385 enum ath10k_htc_ep_id eid;
386 u8 *trailer;
387 int ret;
388
389 if (trailer_present) {
390 trailer = skb->data + skb->len - htc_hdr->trailer_len;
391
392 eid = pipe_id_to_eid(htc_hdr->eid);
393
394 ret = ath10k_htc_process_trailer(htc,
395 trailer,
396 htc_hdr->trailer_len,
397 eid,
398 lookaheads,
399 n_lookaheads);
400 if (ret)
401 return ret;
402
403 if (is_trailer_only_msg(pkt))
404 pkt->trailer_only = true;
405
406 skb_trim(skb, skb->len - htc_hdr->trailer_len);
407 }
408
409 skb_pull(skb, sizeof(*htc_hdr));
410
411 return 0;
412}
413
414static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
415 u32 lookaheads[],
416 int *n_lookahead)
417{
418 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
419 struct ath10k_htc *htc = &ar->htc;
420 struct ath10k_sdio_rx_data *pkt;
421 struct ath10k_htc_ep *ep;
422 struct ath10k_skb_rxcb *cb;
423 enum ath10k_htc_ep_id id;
424 int ret, i, *n_lookahead_local;
425 u32 *lookaheads_local;
426 int lookahead_idx = 0;
427
428 for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
429 lookaheads_local = lookaheads;
430 n_lookahead_local = n_lookahead;
431
432 id = ((struct ath10k_htc_hdr *)
433 &lookaheads[lookahead_idx++])->eid;
434
435 if (id >= ATH10K_HTC_EP_COUNT) {
436 ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
437 id);
438 ret = -ENOMEM;
439 goto out;
440 }
441
442 ep = &htc->endpoint[id];
443
444 if (ep->service_id == 0) {
445 ath10k_warn(ar, "ep %d is not connected\n", id);
446 ret = -ENOMEM;
447 goto out;
448 }
449
450 pkt = &ar_sdio->rx_pkts[i];
451
452 if (pkt->part_of_bundle && !pkt->last_in_bundle) {
453
454
455
456 lookahead_idx--;
457 lookaheads_local = NULL;
458 n_lookahead_local = NULL;
459 }
460
461 ret = ath10k_sdio_mbox_rx_process_packet(ar,
462 pkt,
463 lookaheads_local,
464 n_lookahead_local);
465 if (ret)
466 goto out;
467
468 if (!pkt->trailer_only) {
469 cb = ATH10K_SKB_RXCB(pkt->skb);
470 cb->eid = id;
471
472 skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
473 queue_work(ar->workqueue_aux,
474 &ar_sdio->async_work_rx);
475 } else {
476 kfree_skb(pkt->skb);
477 }
478
479
480 pkt->skb = NULL;
481 pkt->alloc_len = 0;
482 }
483
484 ret = 0;
485
486out:
487
488
489
490 for (; i < ar_sdio->n_rx_pkts; i++)
491 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
492
493 return ret;
494}
495
496static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
497 struct ath10k_sdio_rx_data *rx_pkts,
498 struct ath10k_htc_hdr *htc_hdr,
499 size_t full_len, size_t act_len,
500 size_t *bndl_cnt)
501{
502 int ret, i;
503 u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
504
505 *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
506
507 if (*bndl_cnt > max_msgs) {
508 ath10k_warn(ar,
509 "HTC bundle length %u exceeds maximum %u\n",
510 le16_to_cpu(htc_hdr->len),
511 max_msgs);
512 return -ENOMEM;
513 }
514
515
516
517
518
519
520
521 for (i = 0; i < *bndl_cnt; i++) {
522 ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
523 act_len,
524 full_len,
525 true,
526 false);
527 if (ret)
528 return ret;
529 }
530
531 return 0;
532}
533
534static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
535 u32 lookaheads[], int n_lookaheads)
536{
537 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
538 struct ath10k_htc_hdr *htc_hdr;
539 size_t full_len, act_len;
540 bool last_in_bundle;
541 int ret, i;
542 int pkt_cnt = 0;
543
544 if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
545 ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
546 n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
547 ret = -ENOMEM;
548 goto err;
549 }
550
551 for (i = 0; i < n_lookaheads; i++) {
552 htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
553 last_in_bundle = false;
554
555 if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
556 ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
557 le16_to_cpu(htc_hdr->len),
558 ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
559 ret = -ENOMEM;
560 goto err;
561 }
562
563 act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
564 full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
565
566 if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
567 ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
568 htc_hdr->eid, htc_hdr->flags,
569 le16_to_cpu(htc_hdr->len));
570 ret = -EINVAL;
571 goto err;
572 }
573
574 if (ath10k_htc_get_bundle_count(
575 ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
576
577
578
579
580 size_t bndl_cnt;
581
582 ret = ath10k_sdio_mbox_alloc_bundle(ar,
583 &ar_sdio->rx_pkts[pkt_cnt],
584 htc_hdr,
585 full_len,
586 act_len,
587 &bndl_cnt);
588
589 if (ret) {
590 ath10k_warn(ar, "failed to allocate a bundle: %d\n",
591 ret);
592 goto err;
593 }
594
595 pkt_cnt += bndl_cnt;
596
597
598 last_in_bundle = true;
599 }
600
601
602
603
604
605 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
606 full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
607
608 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
609 act_len,
610 full_len,
611 last_in_bundle,
612 last_in_bundle);
613 if (ret) {
614 ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
615 goto err;
616 }
617
618 pkt_cnt++;
619 }
620
621 ar_sdio->n_rx_pkts = pkt_cnt;
622
623 return 0;
624
625err:
626 for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
627 if (!ar_sdio->rx_pkts[i].alloc_len)
628 break;
629 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
630 }
631
632 return ret;
633}
634
635static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
636{
637 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
638 struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
639 struct sk_buff *skb = pkt->skb;
640 struct ath10k_htc_hdr *htc_hdr;
641 int ret;
642
643 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
644 skb->data, pkt->alloc_len);
645 if (ret)
646 goto err;
647
648 htc_hdr = (struct ath10k_htc_hdr *)skb->data;
649 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
650
651 if (pkt->act_len > pkt->alloc_len) {
652 ret = -EINVAL;
653 goto err;
654 }
655
656 skb_put(skb, pkt->act_len);
657 return 0;
658
659err:
660 ar_sdio->n_rx_pkts = 0;
661 ath10k_sdio_mbox_free_rx_pkt(pkt);
662
663 return ret;
664}
665
666static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
667{
668 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
669 struct ath10k_sdio_rx_data *pkt;
670 struct ath10k_htc_hdr *htc_hdr;
671 int ret, i;
672 u32 pkt_offset, virt_pkt_len;
673
674 virt_pkt_len = 0;
675 for (i = 0; i < ar_sdio->n_rx_pkts; i++)
676 virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
677
678 if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
679 ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
680 ret = -E2BIG;
681 goto err;
682 }
683
684 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
685 ar_sdio->vsg_buffer, virt_pkt_len);
686 if (ret) {
687 ath10k_warn(ar, "failed to read bundle packets: %d", ret);
688 goto err;
689 }
690
691 pkt_offset = 0;
692 for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
693 pkt = &ar_sdio->rx_pkts[i];
694 htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
695 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
696
697 if (pkt->act_len > pkt->alloc_len) {
698 ret = -EINVAL;
699 goto err;
700 }
701
702 skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
703 pkt_offset += pkt->alloc_len;
704 }
705
706 return 0;
707
708err:
709
710 for (i = 0; i < ar_sdio->n_rx_pkts; i++)
711 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
712
713 ar_sdio->n_rx_pkts = 0;
714
715 return ret;
716}
717
718
719
720
721
722
723#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
724
725static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
726 u32 msg_lookahead, bool *done)
727{
728 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
729 u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
730 int n_lookaheads = 1;
731 unsigned long timeout;
732 int ret;
733
734 *done = true;
735
736
737
738
739 lookaheads[0] = msg_lookahead;
740
741 timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
742 do {
743
744
745
746 ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
747 n_lookaheads);
748 if (ret)
749 break;
750
751 if (ar_sdio->n_rx_pkts >= 2)
752
753
754
755 *done = false;
756
757 if (ar_sdio->n_rx_pkts > 1)
758 ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
759 else
760 ret = ath10k_sdio_mbox_rx_fetch(ar);
761
762
763
764
765
766 n_lookaheads = 0;
767 ret = ath10k_sdio_mbox_rx_process_packets(ar,
768 lookaheads,
769 &n_lookaheads);
770
771 if (!n_lookaheads || ret)
772 break;
773
774
775
776
777
778
779
780 *done = false;
781 } while (time_before(jiffies, timeout));
782
783 if (ret && (ret != -ECANCELED))
784 ath10k_warn(ar, "failed to get pending recv messages: %d\n",
785 ret);
786
787 return ret;
788}
789
790static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
791{
792 u32 val;
793 int ret;
794
795
796 ath10k_warn(ar, "firmware crashed\n");
797
798
799
800
801 ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
802 if (ret)
803 ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
804
805 return ret;
806}
807
808static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
809{
810 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
811 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
812 u8 counter_int_status;
813 int ret;
814
815 mutex_lock(&irq_data->mtx);
816 counter_int_status = irq_data->irq_proc_reg->counter_int_status &
817 irq_data->irq_en_reg->cntr_int_status_en;
818
819
820
821
822
823 if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
824 ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
825 else
826 ret = 0;
827
828 mutex_unlock(&irq_data->mtx);
829
830 return ret;
831}
832
833static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
834{
835 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
836 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
837 u8 error_int_status;
838 int ret;
839
840 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
841
842 error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
843 if (!error_int_status) {
844 ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
845 error_int_status);
846 return -EIO;
847 }
848
849 ath10k_dbg(ar, ATH10K_DBG_SDIO,
850 "sdio error_int_status 0x%x\n", error_int_status);
851
852 if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
853 error_int_status))
854 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
855
856 if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
857 error_int_status))
858 ath10k_warn(ar, "rx underflow interrupt error\n");
859
860 if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
861 error_int_status))
862 ath10k_warn(ar, "tx overflow interrupt error\n");
863
864
865 irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
866
867
868 ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
869 error_int_status);
870 if (ret) {
871 ath10k_warn(ar, "unable to write to error int status address: %d\n",
872 ret);
873 return ret;
874 }
875
876 return 0;
877}
878
879static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
880{
881 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
882 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
883 u8 cpu_int_status;
884 int ret;
885
886 mutex_lock(&irq_data->mtx);
887 cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
888 irq_data->irq_en_reg->cpu_int_status_en;
889 if (!cpu_int_status) {
890 ath10k_warn(ar, "CPU interrupt status is zero\n");
891 ret = -EIO;
892 goto out;
893 }
894
895
896 irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
897
898
899
900
901
902
903
904
905 ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
906 cpu_int_status);
907 if (ret) {
908 ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
909 ret);
910 goto out;
911 }
912
913out:
914 mutex_unlock(&irq_data->mtx);
915 if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK) {
916 ath10k_err(ar, "firmware crashed!\n");
917 queue_work(ar->workqueue, &ar->restart_work);
918 }
919 return ret;
920}
921
922static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
923 u8 *host_int_status,
924 u32 *lookahead)
925{
926 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
927 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
928 struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
929 struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
930 u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
931 int ret;
932
933 mutex_lock(&irq_data->mtx);
934
935 *lookahead = 0;
936 *host_int_status = 0;
937
938
939
940
941
942
943
944 if (!irq_en_reg->int_status_en) {
945 ret = 0;
946 goto out;
947 }
948
949
950
951
952
953
954 ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
955 irq_proc_reg, sizeof(*irq_proc_reg));
956 if (ret) {
957 queue_work(ar->workqueue, &ar->restart_work);
958 ath10k_warn(ar, "read int status fail, start recovery\n");
959 goto out;
960 }
961
962
963 *host_int_status = irq_proc_reg->host_int_status &
964 irq_en_reg->int_status_en;
965
966
967 if (!(*host_int_status & htc_mbox)) {
968 *lookahead = 0;
969 ret = 0;
970 goto out;
971 }
972
973
974
975
976 *host_int_status &= ~htc_mbox;
977 if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
978 *lookahead = le32_to_cpu(
979 irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
980 if (!*lookahead)
981 ath10k_warn(ar, "sdio mbox lookahead is zero\n");
982 }
983
984out:
985 mutex_unlock(&irq_data->mtx);
986 return ret;
987}
988
989static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
990 bool *done)
991{
992 u8 host_int_status;
993 u32 lookahead;
994 int ret;
995
996
997
998
999
1000
1001
1002 ret = ath10k_sdio_mbox_read_int_status(ar,
1003 &host_int_status,
1004 &lookahead);
1005 if (ret) {
1006 *done = true;
1007 goto out;
1008 }
1009
1010 if (!host_int_status && !lookahead) {
1011 ret = 0;
1012 *done = true;
1013 goto out;
1014 }
1015
1016 if (lookahead) {
1017 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1018 "sdio pending mailbox msg lookahead 0x%08x\n",
1019 lookahead);
1020
1021 ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
1022 lookahead,
1023 done);
1024 if (ret)
1025 goto out;
1026 }
1027
1028
1029 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1030 "sdio host_int_status 0x%x\n", host_int_status);
1031
1032 if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
1033
1034 ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
1035 if (ret)
1036 goto out;
1037 }
1038
1039 if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
1040
1041 ret = ath10k_sdio_mbox_proc_err_intr(ar);
1042 if (ret)
1043 goto out;
1044 }
1045
1046 if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
1047
1048 ret = ath10k_sdio_mbox_proc_counter_intr(ar);
1049
1050 ret = 0;
1051
1052out:
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1066 "sdio pending irqs done %d status %d",
1067 *done, ret);
1068
1069 return ret;
1070}
1071
1072static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
1073{
1074 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1075 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1076 u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
1077
1078 mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
1079 mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
1080 mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
1081 mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
1082 mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
1083
1084 mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
1085
1086 dev_id_base = (device & 0x0F00);
1087 dev_id_chiprev = (device & 0x00FF);
1088 switch (dev_id_base) {
1089 case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):
1090 if (dev_id_chiprev < 4)
1091 mbox_info->ext_info[0].htc_ext_sz =
1092 ATH10K_HIF_MBOX0_EXT_WIDTH;
1093 else
1094
1095
1096
1097 mbox_info->ext_info[0].htc_ext_sz =
1098 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1099 break;
1100 case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):
1101 mbox_info->ext_info[0].htc_ext_sz =
1102 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1103 break;
1104 default:
1105 mbox_info->ext_info[0].htc_ext_sz =
1106 ATH10K_HIF_MBOX0_EXT_WIDTH;
1107 }
1108
1109 mbox_info->ext_info[1].htc_ext_addr =
1110 mbox_info->ext_info[0].htc_ext_addr +
1111 mbox_info->ext_info[0].htc_ext_sz +
1112 ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
1113 mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
1114}
1115
1116
1117
1118static int ath10k_sdio_bmi_credits(struct ath10k *ar)
1119{
1120 u32 addr, cmd_credits;
1121 unsigned long timeout;
1122 int ret;
1123
1124
1125 addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
1126 timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1127 cmd_credits = 0;
1128
1129 while (time_before(jiffies, timeout) && !cmd_credits) {
1130
1131
1132
1133
1134
1135 ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
1136 if (ret) {
1137 ath10k_warn(ar,
1138 "unable to decrement the command credit count register: %d\n",
1139 ret);
1140 return ret;
1141 }
1142
1143
1144
1145
1146 cmd_credits &= 0xFF;
1147 }
1148
1149 if (!cmd_credits) {
1150 ath10k_warn(ar, "bmi communication timeout\n");
1151 return -ETIMEDOUT;
1152 }
1153
1154 return 0;
1155}
1156
1157static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
1158{
1159 unsigned long timeout;
1160 u32 rx_word;
1161 int ret;
1162
1163 timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1164 rx_word = 0;
1165
1166 while ((time_before(jiffies, timeout)) && !rx_word) {
1167 ret = ath10k_sdio_read32(ar,
1168 MBOX_HOST_INT_STATUS_ADDRESS,
1169 &rx_word);
1170 if (ret) {
1171 ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
1172 return ret;
1173 }
1174
1175
1176 rx_word &= 1;
1177 }
1178
1179 if (!rx_word) {
1180 ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
1181 return -EINVAL;
1182 }
1183
1184 return ret;
1185}
1186
1187static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
1188 void *req, u32 req_len,
1189 void *resp, u32 *resp_len)
1190{
1191 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1192 u32 addr;
1193 int ret;
1194
1195 if (req) {
1196 ret = ath10k_sdio_bmi_credits(ar);
1197 if (ret)
1198 return ret;
1199
1200 addr = ar_sdio->mbox_info.htc_addr;
1201
1202 memcpy(ar_sdio->bmi_buf, req, req_len);
1203 ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
1204 if (ret) {
1205 ath10k_warn(ar,
1206 "unable to send the bmi data to the device: %d\n",
1207 ret);
1208 return ret;
1209 }
1210 }
1211
1212 if (!resp || !resp_len)
1213
1214 return 0;
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261 ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
1262 if (ret)
1263 return ret;
1264
1265
1266 addr = ar_sdio->mbox_info.htc_addr;
1267 ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
1268 if (ret) {
1269 ath10k_warn(ar,
1270 "unable to read the bmi data from the device: %d\n",
1271 ret);
1272 return ret;
1273 }
1274
1275 memcpy(resp, ar_sdio->bmi_buf, *resp_len);
1276
1277 return 0;
1278}
1279
1280
1281
1282static struct ath10k_sdio_bus_request
1283*ath10k_sdio_alloc_busreq(struct ath10k *ar)
1284{
1285 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1286 struct ath10k_sdio_bus_request *bus_req;
1287
1288 spin_lock_bh(&ar_sdio->lock);
1289
1290 if (list_empty(&ar_sdio->bus_req_freeq)) {
1291 bus_req = NULL;
1292 goto out;
1293 }
1294
1295 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
1296 struct ath10k_sdio_bus_request, list);
1297 list_del(&bus_req->list);
1298
1299out:
1300 spin_unlock_bh(&ar_sdio->lock);
1301 return bus_req;
1302}
1303
1304static void ath10k_sdio_free_bus_req(struct ath10k *ar,
1305 struct ath10k_sdio_bus_request *bus_req)
1306{
1307 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1308
1309 memset(bus_req, 0, sizeof(*bus_req));
1310
1311 spin_lock_bh(&ar_sdio->lock);
1312 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
1313 spin_unlock_bh(&ar_sdio->lock);
1314}
1315
1316static void __ath10k_sdio_write_async(struct ath10k *ar,
1317 struct ath10k_sdio_bus_request *req)
1318{
1319 struct ath10k_htc_ep *ep;
1320 struct sk_buff *skb;
1321 int ret;
1322
1323 skb = req->skb;
1324 ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
1325 if (ret)
1326 ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
1327 req->address, ret);
1328
1329 if (req->htc_msg) {
1330 ep = &ar->htc.endpoint[req->eid];
1331 ath10k_htc_notify_tx_completion(ep, skb);
1332 } else if (req->comp) {
1333 complete(req->comp);
1334 }
1335
1336 ath10k_sdio_free_bus_req(ar, req);
1337}
1338
1339
1340
1341
1342static void ath10k_rx_indication_async_work(struct work_struct *work)
1343{
1344 struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1345 async_work_rx);
1346 struct ath10k *ar = ar_sdio->ar;
1347 struct ath10k_htc_ep *ep;
1348 struct ath10k_skb_rxcb *cb;
1349 struct sk_buff *skb;
1350
1351 while (true) {
1352 skb = skb_dequeue(&ar_sdio->rx_head);
1353 if (!skb)
1354 break;
1355 cb = ATH10K_SKB_RXCB(skb);
1356 ep = &ar->htc.endpoint[cb->eid];
1357 ep->ep_ops.ep_rx_complete(ar, skb);
1358 }
1359
1360 if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
1361 napi_schedule(&ar->napi);
1362}
1363
1364static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
1365{
1366 struct ath10k *ar = ar_sdio->ar;
1367 unsigned char rtc_state = 0;
1368 int ret = 0;
1369
1370 rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
1371 if (ret) {
1372 ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
1373 return ret;
1374 }
1375
1376 *state = rtc_state & 0x3;
1377
1378 return ret;
1379}
1380
1381static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1382{
1383 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1384 u32 val;
1385 int retry = ATH10K_CIS_READ_RETRY, ret = 0;
1386 unsigned char rtc_state = 0;
1387
1388 sdio_claim_host(ar_sdio->func);
1389
1390 ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1391 if (ret) {
1392 ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1393 ret);
1394 goto release;
1395 }
1396
1397 if (enable_sleep) {
1398 val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1399 ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
1400 } else {
1401 val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1402 ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
1403 }
1404
1405 ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1406 if (ret) {
1407 ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1408 ret);
1409 }
1410
1411 if (!enable_sleep) {
1412 do {
1413 udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
1414 ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
1415
1416 if (ret) {
1417 ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
1418 break;
1419 }
1420
1421 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
1422 rtc_state);
1423
1424 if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
1425 break;
1426
1427 udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
1428 retry--;
1429 } while (retry > 0);
1430 }
1431
1432release:
1433 sdio_release_host(ar_sdio->func);
1434
1435 return ret;
1436}
1437
1438static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
1439{
1440 struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
1441
1442 ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
1443 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1444}
1445
1446static void ath10k_sdio_write_async_work(struct work_struct *work)
1447{
1448 struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1449 wr_async_work);
1450 struct ath10k *ar = ar_sdio->ar;
1451 struct ath10k_sdio_bus_request *req, *tmp_req;
1452 struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1453
1454 spin_lock_bh(&ar_sdio->wr_async_lock);
1455
1456 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1457 list_del(&req->list);
1458 spin_unlock_bh(&ar_sdio->wr_async_lock);
1459
1460 if (req->address >= mbox_info->htc_addr &&
1461 ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
1462 ath10k_sdio_set_mbox_sleep(ar, false);
1463 mod_timer(&ar_sdio->sleep_timer, jiffies +
1464 msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
1465 }
1466
1467 __ath10k_sdio_write_async(ar, req);
1468 spin_lock_bh(&ar_sdio->wr_async_lock);
1469 }
1470
1471 spin_unlock_bh(&ar_sdio->wr_async_lock);
1472
1473 if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
1474 ath10k_sdio_set_mbox_sleep(ar, true);
1475}
1476
1477static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
1478 struct sk_buff *skb,
1479 struct completion *comp,
1480 bool htc_msg, enum ath10k_htc_ep_id eid)
1481{
1482 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1483 struct ath10k_sdio_bus_request *bus_req;
1484
1485
1486
1487
1488 bus_req = ath10k_sdio_alloc_busreq(ar);
1489 if (!bus_req) {
1490 ath10k_warn(ar,
1491 "unable to allocate bus request for async request\n");
1492 return -ENOMEM;
1493 }
1494
1495 bus_req->skb = skb;
1496 bus_req->eid = eid;
1497 bus_req->address = addr;
1498 bus_req->htc_msg = htc_msg;
1499 bus_req->comp = comp;
1500
1501 spin_lock_bh(&ar_sdio->wr_async_lock);
1502 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
1503 spin_unlock_bh(&ar_sdio->wr_async_lock);
1504
1505 return 0;
1506}
1507
1508
1509
1510static void ath10k_sdio_irq_handler(struct sdio_func *func)
1511{
1512 struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
1513 struct ath10k *ar = ar_sdio->ar;
1514 unsigned long timeout;
1515 bool done = false;
1516 int ret;
1517
1518
1519
1520
1521 sdio_release_host(ar_sdio->func);
1522
1523 timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
1524 do {
1525 ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
1526 if (ret)
1527 break;
1528 } while (time_before(jiffies, timeout) && !done);
1529
1530 ath10k_mac_tx_push_pending(ar);
1531
1532 sdio_claim_host(ar_sdio->func);
1533
1534 if (ret && ret != -ECANCELED)
1535 ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
1536 ret);
1537}
1538
1539
1540
1541static int ath10k_sdio_disable_intrs(struct ath10k *ar)
1542{
1543 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1544 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1545 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1546 int ret;
1547
1548 mutex_lock(&irq_data->mtx);
1549
1550 memset(regs, 0, sizeof(*regs));
1551 ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1552 ®s->int_status_en, sizeof(*regs));
1553 if (ret)
1554 ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
1555
1556 mutex_unlock(&irq_data->mtx);
1557
1558 return ret;
1559}
1560
1561static int ath10k_sdio_hif_power_up(struct ath10k *ar,
1562 enum ath10k_firmware_mode fw_mode)
1563{
1564 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1565 struct sdio_func *func = ar_sdio->func;
1566 int ret;
1567
1568 if (!ar_sdio->is_disabled)
1569 return 0;
1570
1571 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
1572
1573 ret = ath10k_sdio_config(ar);
1574 if (ret) {
1575 ath10k_err(ar, "failed to config sdio: %d\n", ret);
1576 return ret;
1577 }
1578
1579 sdio_claim_host(func);
1580
1581 ret = sdio_enable_func(func);
1582 if (ret) {
1583 ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
1584 sdio_release_host(func);
1585 return ret;
1586 }
1587
1588 sdio_release_host(func);
1589
1590
1591
1592
1593 msleep(20);
1594
1595 ar_sdio->is_disabled = false;
1596
1597 ret = ath10k_sdio_disable_intrs(ar);
1598 if (ret)
1599 return ret;
1600
1601 return 0;
1602}
1603
1604static void ath10k_sdio_hif_power_down(struct ath10k *ar)
1605{
1606 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1607 int ret;
1608
1609 if (ar_sdio->is_disabled)
1610 return;
1611
1612 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
1613
1614 del_timer_sync(&ar_sdio->sleep_timer);
1615 ath10k_sdio_set_mbox_sleep(ar, true);
1616
1617
1618 sdio_claim_host(ar_sdio->func);
1619
1620 ret = sdio_disable_func(ar_sdio->func);
1621 if (ret) {
1622 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
1623 sdio_release_host(ar_sdio->func);
1624 return;
1625 }
1626
1627 ret = mmc_hw_reset(ar_sdio->func->card->host);
1628 if (ret)
1629 ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
1630
1631 sdio_release_host(ar_sdio->func);
1632
1633 ar_sdio->is_disabled = true;
1634}
1635
1636static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1637 struct ath10k_hif_sg_item *items, int n_items)
1638{
1639 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1640 enum ath10k_htc_ep_id eid;
1641 struct sk_buff *skb;
1642 int ret, i;
1643
1644 eid = pipe_id_to_eid(pipe_id);
1645
1646 for (i = 0; i < n_items; i++) {
1647 size_t padded_len;
1648 u32 address;
1649
1650 skb = items[i].transfer_context;
1651 padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
1652 skb->len);
1653 skb_trim(skb, padded_len);
1654
1655
1656 address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
1657 skb->len;
1658 ret = ath10k_sdio_prep_async_req(ar, address, skb,
1659 NULL, true, eid);
1660 if (ret)
1661 return ret;
1662 }
1663
1664 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1665
1666 return 0;
1667}
1668
1669static int ath10k_sdio_enable_intrs(struct ath10k *ar)
1670{
1671 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1672 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1673 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1674 int ret;
1675
1676 mutex_lock(&irq_data->mtx);
1677
1678
1679 regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
1680 FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
1681 FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
1682
1683
1684
1685
1686 regs->int_status_en |=
1687 FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
1688
1689
1690
1691
1692 regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
1693
1694
1695 regs->err_int_status_en =
1696 FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
1697 FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
1698
1699
1700
1701
1702 regs->cntr_int_status_en =
1703 FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
1704 ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
1705
1706 ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1707 ®s->int_status_en, sizeof(*regs));
1708 if (ret)
1709 ath10k_warn(ar,
1710 "failed to update mbox interrupt status register : %d\n",
1711 ret);
1712
1713 mutex_unlock(&irq_data->mtx);
1714 return ret;
1715}
1716
1717
1718
1719static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1720 size_t buf_len)
1721{
1722 int ret;
1723 void *mem;
1724
1725 mem = kzalloc(buf_len, GFP_KERNEL);
1726 if (!mem)
1727 return -ENOMEM;
1728
1729
1730 ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
1731 if (ret) {
1732 ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
1733 goto out;
1734 }
1735
1736
1737 ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
1738 if (ret) {
1739 ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
1740 ret);
1741 goto out;
1742 }
1743
1744 memcpy(buf, mem, buf_len);
1745
1746out:
1747 kfree(mem);
1748
1749 return ret;
1750}
1751
1752static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
1753 u32 *value)
1754{
1755 __le32 *val;
1756 int ret;
1757
1758 val = kzalloc(sizeof(*val), GFP_KERNEL);
1759 if (!val)
1760 return -ENOMEM;
1761
1762 ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
1763 if (ret)
1764 goto out;
1765
1766 *value = __le32_to_cpu(*val);
1767
1768out:
1769 kfree(val);
1770
1771 return ret;
1772}
1773
1774static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
1775 const void *data, int nbytes)
1776{
1777 int ret;
1778
1779
1780 ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
1781 if (ret) {
1782 ath10k_warn(ar,
1783 "failed to write 0x%p to mbox window data address: %d\n",
1784 data, ret);
1785 return ret;
1786 }
1787
1788
1789 ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
1790 if (ret) {
1791 ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
1792 return ret;
1793 }
1794
1795 return 0;
1796}
1797
1798static int ath10k_sdio_hif_start_post(struct ath10k *ar)
1799{
1800 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1801 u32 addr, val;
1802 int ret = 0;
1803
1804 addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1805
1806 ret = ath10k_sdio_diag_read32(ar, addr, &val);
1807 if (ret) {
1808 ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
1809 return ret;
1810 }
1811
1812 if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1813 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1814 "sdio mailbox swap service enabled\n");
1815 ar_sdio->swap_mbox = true;
1816 } else {
1817 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1818 "sdio mailbox swap service disabled\n");
1819 ar_sdio->swap_mbox = false;
1820 }
1821
1822 ath10k_sdio_set_mbox_sleep(ar, true);
1823
1824 return 0;
1825}
1826
1827static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
1828{
1829 u32 addr, val;
1830 int ret;
1831
1832 addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1833
1834 ret = ath10k_sdio_diag_read32(ar, addr, &val);
1835 if (ret) {
1836 ath10k_warn(ar,
1837 "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
1838 return ret;
1839 }
1840
1841 ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
1842
1843 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
1844 ret ? " " : " not ");
1845
1846 return ret;
1847}
1848
1849
1850
1851static int ath10k_sdio_hif_start(struct ath10k *ar)
1852{
1853 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1854 int ret;
1855
1856 napi_enable(&ar->napi);
1857
1858
1859
1860
1861
1862 msleep(20);
1863 ret = ath10k_sdio_disable_intrs(ar);
1864 if (ret)
1865 return ret;
1866
1867
1868
1869
1870 ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1871 ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1872
1873 sdio_claim_host(ar_sdio->func);
1874
1875
1876 ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
1877 if (ret) {
1878 ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
1879 sdio_release_host(ar_sdio->func);
1880 return ret;
1881 }
1882
1883 sdio_release_host(ar_sdio->func);
1884
1885 ret = ath10k_sdio_enable_intrs(ar);
1886 if (ret)
1887 ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
1888
1889
1890 ret = ath10k_sdio_set_mbox_sleep(ar, true);
1891 if (ret)
1892 return ret;
1893
1894
1895 msleep(20);
1896
1897 ret = ath10k_sdio_set_mbox_sleep(ar, false);
1898 if (ret)
1899 return ret;
1900
1901 return 0;
1902}
1903
1904#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
1905
1906static void ath10k_sdio_irq_disable(struct ath10k *ar)
1907{
1908 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1909 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1910 struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1911 struct sk_buff *skb;
1912 struct completion irqs_disabled_comp;
1913 int ret;
1914
1915 skb = dev_alloc_skb(sizeof(*regs));
1916 if (!skb)
1917 return;
1918
1919 mutex_lock(&irq_data->mtx);
1920
1921 memset(regs, 0, sizeof(*regs));
1922 memcpy(skb->data, regs, sizeof(*regs));
1923 skb_put(skb, sizeof(*regs));
1924
1925 mutex_unlock(&irq_data->mtx);
1926
1927 init_completion(&irqs_disabled_comp);
1928 ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1929 skb, &irqs_disabled_comp, false, 0);
1930 if (ret)
1931 goto out;
1932
1933 queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1934
1935
1936
1937
1938 ret = wait_for_completion_timeout(&irqs_disabled_comp,
1939 SDIO_IRQ_DISABLE_TIMEOUT_HZ);
1940 if (!ret)
1941 ath10k_warn(ar, "sdio irq disable request timed out\n");
1942
1943 sdio_claim_host(ar_sdio->func);
1944
1945 ret = sdio_release_irq(ar_sdio->func);
1946 if (ret)
1947 ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
1948
1949 sdio_release_host(ar_sdio->func);
1950
1951out:
1952 kfree_skb(skb);
1953}
1954
1955static void ath10k_sdio_hif_stop(struct ath10k *ar)
1956{
1957 struct ath10k_sdio_bus_request *req, *tmp_req;
1958 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1959
1960 ath10k_sdio_irq_disable(ar);
1961
1962 cancel_work_sync(&ar_sdio->wr_async_work);
1963
1964 spin_lock_bh(&ar_sdio->wr_async_lock);
1965
1966
1967 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1968 struct ath10k_htc_ep *ep;
1969
1970 list_del(&req->list);
1971
1972 if (req->htc_msg) {
1973 ep = &ar->htc.endpoint[req->eid];
1974 ath10k_htc_notify_tx_completion(ep, req->skb);
1975 } else if (req->skb) {
1976 kfree_skb(req->skb);
1977 }
1978 ath10k_sdio_free_bus_req(ar, req);
1979 }
1980
1981 spin_unlock_bh(&ar_sdio->wr_async_lock);
1982
1983 napi_synchronize(&ar->napi);
1984 napi_disable(&ar->napi);
1985}
1986
1987#ifdef CONFIG_PM
1988
1989static int ath10k_sdio_hif_suspend(struct ath10k *ar)
1990{
1991 return 0;
1992}
1993
1994static int ath10k_sdio_hif_resume(struct ath10k *ar)
1995{
1996 switch (ar->state) {
1997 case ATH10K_STATE_OFF:
1998 ath10k_dbg(ar, ATH10K_DBG_SDIO,
1999 "sdio resume configuring sdio\n");
2000
2001
2002 ath10k_sdio_config(ar);
2003 break;
2004
2005 case ATH10K_STATE_ON:
2006 default:
2007 break;
2008 }
2009
2010 return 0;
2011}
2012#endif
2013
2014static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
2015 u16 service_id,
2016 u8 *ul_pipe, u8 *dl_pipe)
2017{
2018 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
2019 struct ath10k_htc *htc = &ar->htc;
2020 u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
2021 enum ath10k_htc_ep_id eid;
2022 bool ep_found = false;
2023 int i;
2024
2025
2026
2027
2028
2029
2030 for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
2031 if (htc->endpoint[i].service_id == service_id) {
2032 eid = htc->endpoint[i].eid;
2033 ep_found = true;
2034 break;
2035 }
2036 }
2037
2038 if (!ep_found)
2039 return -EINVAL;
2040
2041
2042
2043
2044 *ul_pipe = *dl_pipe = (u8)eid;
2045
2046
2047
2048
2049
2050
2051 if (ar_sdio->swap_mbox) {
2052 htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
2053 wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
2054 htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
2055 wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
2056 } else {
2057 htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
2058 wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
2059 htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
2060 wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
2061 }
2062
2063 switch (service_id) {
2064 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
2065
2066
2067
2068 break;
2069 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
2070 ar_sdio->mbox_addr[eid] = wmi_addr;
2071 ar_sdio->mbox_size[eid] = wmi_mbox_size;
2072 ath10k_dbg(ar, ATH10K_DBG_SDIO,
2073 "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
2074 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
2075 break;
2076 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
2077 ar_sdio->mbox_addr[eid] = htt_addr;
2078 ar_sdio->mbox_size[eid] = htt_mbox_size;
2079 ath10k_dbg(ar, ATH10K_DBG_SDIO,
2080 "sdio htt data mbox_addr 0x%x mbox_size %d\n",
2081 ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
2082 break;
2083 default:
2084 ath10k_warn(ar, "unsupported HTC service id: %d\n",
2085 service_id);
2086 return -EINVAL;
2087 }
2088
2089 return 0;
2090}
2091
2092static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
2093 u8 *ul_pipe, u8 *dl_pipe)
2094{
2095 ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
2096
2097
2098
2099
2100 *ul_pipe = 0;
2101 *dl_pipe = 0;
2102}
2103
2104static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
2105 .tx_sg = ath10k_sdio_hif_tx_sg,
2106 .diag_read = ath10k_sdio_hif_diag_read,
2107 .diag_write = ath10k_sdio_hif_diag_write_mem,
2108 .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
2109 .start = ath10k_sdio_hif_start,
2110 .stop = ath10k_sdio_hif_stop,
2111 .start_post = ath10k_sdio_hif_start_post,
2112 .get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,
2113 .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
2114 .get_default_pipe = ath10k_sdio_hif_get_default_pipe,
2115 .power_up = ath10k_sdio_hif_power_up,
2116 .power_down = ath10k_sdio_hif_power_down,
2117#ifdef CONFIG_PM
2118 .suspend = ath10k_sdio_hif_suspend,
2119 .resume = ath10k_sdio_hif_resume,
2120#endif
2121};
2122
2123#ifdef CONFIG_PM_SLEEP
2124
2125
2126
2127
2128static int ath10k_sdio_pm_suspend(struct device *device)
2129{
2130 struct sdio_func *func = dev_to_sdio_func(device);
2131 struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2132 struct ath10k *ar = ar_sdio->ar;
2133 mmc_pm_flag_t pm_flag, pm_caps;
2134 int ret;
2135
2136 if (!device_may_wakeup(ar->dev))
2137 return 0;
2138
2139 ath10k_sdio_set_mbox_sleep(ar, true);
2140
2141 pm_flag = MMC_PM_KEEP_POWER;
2142
2143 ret = sdio_set_host_pm_flags(func, pm_flag);
2144 if (ret) {
2145 pm_caps = sdio_get_host_pm_caps(func);
2146 ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
2147 pm_flag, pm_caps, ret);
2148 return ret;
2149 }
2150
2151 return ret;
2152}
2153
2154static int ath10k_sdio_pm_resume(struct device *device)
2155{
2156 return 0;
2157}
2158
2159static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
2160 ath10k_sdio_pm_resume);
2161
2162#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
2163
2164#else
2165
2166#define ATH10K_SDIO_PM_OPS NULL
2167
2168#endif
2169
2170static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
2171{
2172 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
2173 int done;
2174
2175 done = ath10k_htt_rx_hl_indication(ar, budget);
2176 ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
2177
2178 if (done < budget)
2179 napi_complete_done(ctx, done);
2180
2181 return done;
2182}
2183
2184static int ath10k_sdio_probe(struct sdio_func *func,
2185 const struct sdio_device_id *id)
2186{
2187 struct ath10k_sdio *ar_sdio;
2188 struct ath10k *ar;
2189 enum ath10k_hw_rev hw_rev;
2190 u32 dev_id_base;
2191 struct ath10k_bus_params bus_params = {};
2192 int ret, i;
2193
2194
2195
2196
2197
2198
2199
2200 hw_rev = ATH10K_HW_QCA6174;
2201
2202 ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
2203 hw_rev, &ath10k_sdio_hif_ops);
2204 if (!ar) {
2205 dev_err(&func->dev, "failed to allocate core\n");
2206 return -ENOMEM;
2207 }
2208
2209 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
2210 ATH10K_NAPI_BUDGET);
2211
2212 ath10k_dbg(ar, ATH10K_DBG_BOOT,
2213 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
2214 func->num, func->vendor, func->device,
2215 func->max_blksize, func->cur_blksize);
2216
2217 ar_sdio = ath10k_sdio_priv(ar);
2218
2219 ar_sdio->irq_data.irq_proc_reg =
2220 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
2221 GFP_KERNEL);
2222 if (!ar_sdio->irq_data.irq_proc_reg) {
2223 ret = -ENOMEM;
2224 goto err_core_destroy;
2225 }
2226
2227 ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
2228 if (!ar_sdio->vsg_buffer) {
2229 ret = -ENOMEM;
2230 goto err_core_destroy;
2231 }
2232
2233 ar_sdio->irq_data.irq_en_reg =
2234 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
2235 GFP_KERNEL);
2236 if (!ar_sdio->irq_data.irq_en_reg) {
2237 ret = -ENOMEM;
2238 goto err_core_destroy;
2239 }
2240
2241 ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
2242 if (!ar_sdio->bmi_buf) {
2243 ret = -ENOMEM;
2244 goto err_core_destroy;
2245 }
2246
2247 ar_sdio->func = func;
2248 sdio_set_drvdata(func, ar_sdio);
2249
2250 ar_sdio->is_disabled = true;
2251 ar_sdio->ar = ar;
2252
2253 spin_lock_init(&ar_sdio->lock);
2254 spin_lock_init(&ar_sdio->wr_async_lock);
2255 mutex_init(&ar_sdio->irq_data.mtx);
2256
2257 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
2258 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
2259
2260 INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
2261 ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
2262 if (!ar_sdio->workqueue) {
2263 ret = -ENOMEM;
2264 goto err_core_destroy;
2265 }
2266
2267 for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
2268 ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
2269
2270 skb_queue_head_init(&ar_sdio->rx_head);
2271 INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
2272
2273 dev_id_base = (id->device & 0x0F00);
2274 if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&
2275 dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {
2276 ret = -ENODEV;
2277 ath10k_err(ar, "unsupported device id %u (0x%x)\n",
2278 dev_id_base, id->device);
2279 goto err_free_wq;
2280 }
2281
2282 ar->dev_id = QCA9377_1_0_DEVICE_ID;
2283 ar->id.vendor = id->vendor;
2284 ar->id.device = id->device;
2285
2286 ath10k_sdio_set_mbox_info(ar);
2287
2288 bus_params.dev_type = ATH10K_DEV_TYPE_HL;
2289
2290 bus_params.chip_id = 0;
2291 bus_params.hl_msdu_ids = true;
2292
2293 ar->hw->max_mtu = ETH_DATA_LEN;
2294
2295 ret = ath10k_core_register(ar, &bus_params);
2296 if (ret) {
2297 ath10k_err(ar, "failed to register driver core: %d\n", ret);
2298 goto err_free_wq;
2299 }
2300
2301 timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
2302
2303 return 0;
2304
2305err_free_wq:
2306 destroy_workqueue(ar_sdio->workqueue);
2307err_core_destroy:
2308 ath10k_core_destroy(ar);
2309
2310 return ret;
2311}
2312
2313static void ath10k_sdio_remove(struct sdio_func *func)
2314{
2315 struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2316 struct ath10k *ar = ar_sdio->ar;
2317
2318 ath10k_dbg(ar, ATH10K_DBG_BOOT,
2319 "sdio removed func %d vendor 0x%x device 0x%x\n",
2320 func->num, func->vendor, func->device);
2321
2322 ath10k_core_unregister(ar);
2323
2324 netif_napi_del(&ar->napi);
2325
2326 ath10k_core_destroy(ar);
2327
2328 flush_workqueue(ar_sdio->workqueue);
2329 destroy_workqueue(ar_sdio->workqueue);
2330}
2331
2332static const struct sdio_device_id ath10k_sdio_devices[] = {
2333 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},
2334 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},
2335 {},
2336};
2337
2338MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2339
2340static struct sdio_driver ath10k_sdio_driver = {
2341 .name = "ath10k_sdio",
2342 .id_table = ath10k_sdio_devices,
2343 .probe = ath10k_sdio_probe,
2344 .remove = ath10k_sdio_remove,
2345 .drv = {
2346 .owner = THIS_MODULE,
2347 .pm = ATH10K_SDIO_PM_OPS,
2348 },
2349};
2350
2351static int __init ath10k_sdio_init(void)
2352{
2353 int ret;
2354
2355 ret = sdio_register_driver(&ath10k_sdio_driver);
2356 if (ret)
2357 pr_err("sdio driver registration failed: %d\n", ret);
2358
2359 return ret;
2360}
2361
2362static void __exit ath10k_sdio_exit(void)
2363{
2364 sdio_unregister_driver(&ath10k_sdio_driver);
2365}
2366
2367module_init(ath10k_sdio_init);
2368module_exit(ath10k_sdio_exit);
2369
2370MODULE_AUTHOR("Qualcomm Atheros");
2371MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
2372MODULE_LICENSE("Dual BSD/GPL");
2373