1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/mmc.h>
21#include <linux/mmc/host.h>
22#include <linux/mmc/sdio_func.h>
23#include <linux/mmc/sdio_ids.h>
24#include <linux/mmc/sdio.h>
25#include <linux/mmc/sd.h>
26#include "hif.h"
27#include "hif-ops.h"
28#include "target.h"
29#include "debug.h"
30#include "cfg80211.h"
31#include "trace.h"
32
33struct ath6kl_sdio {
34 struct sdio_func *func;
35
36
37 spinlock_t lock;
38
39
40 struct list_head bus_req_freeq;
41
42
43 struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
44
45 struct ath6kl *ar;
46
47 u8 *dma_buffer;
48
49
50 struct mutex dma_buffer_mutex;
51
52
53 struct list_head scat_req;
54
55 atomic_t irq_handling;
56 wait_queue_head_t irq_wq;
57
58
59 spinlock_t scat_lock;
60
61 bool scatter_enabled;
62
63 bool is_disabled;
64 const struct sdio_device_id *id;
65 struct work_struct wr_async_work;
66 struct list_head wr_asyncq;
67
68
69 spinlock_t wr_async_lock;
70};
71
72#define CMD53_ARG_READ 0
73#define CMD53_ARG_WRITE 1
74#define CMD53_ARG_BLOCK_BASIS 1
75#define CMD53_ARG_FIXED_ADDRESS 0
76#define CMD53_ARG_INCR_ADDRESS 1
77
78static int ath6kl_sdio_config(struct ath6kl *ar);
79
80static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
81{
82 return ar->hif_priv;
83}
84
85
86
87
88
89
90
91static inline bool buf_needs_bounce(u8 *buf)
92{
93 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
94}
95
96static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
97{
98 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
99
100
101 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
102 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
103 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
104 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
105 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
106 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
107}
108
109static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
110 u8 mode, u8 opcode, u32 addr,
111 u16 blksz)
112{
113 *arg = (((rw & 1) << 31) |
114 ((func & 0x7) << 28) |
115 ((mode & 1) << 27) |
116 ((opcode & 1) << 26) |
117 ((addr & 0x1FFFF) << 9) |
118 (blksz & 0x1FF));
119}
120
121static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
122 unsigned int address,
123 unsigned char val)
124{
125 const u8 func = 0;
126
127 *arg = ((write & 1) << 31) |
128 ((func & 0x7) << 28) |
129 ((raw & 1) << 27) |
130 (1 << 26) |
131 ((address & 0x1FFFF) << 9) |
132 (1 << 8) |
133 (val & 0xFF);
134}
135
136static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
137 unsigned int address,
138 unsigned char byte)
139{
140 struct mmc_command io_cmd;
141
142 memset(&io_cmd, 0, sizeof(io_cmd));
143 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
144 io_cmd.opcode = SD_IO_RW_DIRECT;
145 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
146
147 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
148}
149
150static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
151 u8 *buf, u32 len)
152{
153 int ret = 0;
154
155 sdio_claim_host(func);
156
157 if (request & HIF_WRITE) {
158
159 if (addr >= HIF_MBOX_BASE_ADDR &&
160 addr <= HIF_MBOX_END_ADDR)
161 addr += (HIF_MBOX_WIDTH - len);
162
163
164 if (addr == HIF_MBOX0_EXT_BASE_ADDR)
165 addr += HIF_MBOX0_EXT_WIDTH - len;
166
167 if (request & HIF_FIXED_ADDRESS)
168 ret = sdio_writesb(func, addr, buf, len);
169 else
170 ret = sdio_memcpy_toio(func, addr, buf, len);
171 } else {
172 if (request & HIF_FIXED_ADDRESS)
173 ret = sdio_readsb(func, buf, addr, len);
174 else
175 ret = sdio_memcpy_fromio(func, buf, addr, len);
176 }
177
178 sdio_release_host(func);
179
180 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
181 request & HIF_WRITE ? "wr" : "rd", addr,
182 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
183 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
184
185 trace_ath6kl_sdio(addr, request, buf, len);
186
187 return ret;
188}
189
190static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
191{
192 struct bus_request *bus_req;
193
194 spin_lock_bh(&ar_sdio->lock);
195
196 if (list_empty(&ar_sdio->bus_req_freeq)) {
197 spin_unlock_bh(&ar_sdio->lock);
198 return NULL;
199 }
200
201 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
202 struct bus_request, list);
203 list_del(&bus_req->list);
204
205 spin_unlock_bh(&ar_sdio->lock);
206 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
207 __func__, bus_req);
208
209 return bus_req;
210}
211
212static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
213 struct bus_request *bus_req)
214{
215 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
216 __func__, bus_req);
217
218 spin_lock_bh(&ar_sdio->lock);
219 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
220 spin_unlock_bh(&ar_sdio->lock);
221}
222
223static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
224 struct mmc_data *data)
225{
226 struct scatterlist *sg;
227 int i;
228
229 data->blksz = HIF_MBOX_BLOCK_SIZE;
230 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
231
232 ath6kl_dbg(ATH6KL_DBG_SCATTER,
233 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
234 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
235 data->blksz, data->blocks, scat_req->len,
236 scat_req->scat_entries);
237
238 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
239 MMC_DATA_READ;
240
241
242 sg = scat_req->sgentries;
243 sg_init_table(sg, scat_req->scat_entries);
244
245
246 for (i = 0; i < scat_req->scat_entries; i++, sg++) {
247 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
248 i, scat_req->scat_list[i].buf,
249 scat_req->scat_list[i].len);
250
251 sg_set_buf(sg, scat_req->scat_list[i].buf,
252 scat_req->scat_list[i].len);
253 }
254
255
256 data->sg = scat_req->sgentries;
257 data->sg_len = scat_req->scat_entries;
258}
259
260static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
261 struct bus_request *req)
262{
263 struct mmc_request mmc_req;
264 struct mmc_command cmd;
265 struct mmc_data data;
266 struct hif_scatter_req *scat_req;
267 u8 opcode, rw;
268 int status, len;
269
270 scat_req = req->scat_req;
271
272 if (scat_req->virt_scat) {
273 len = scat_req->len;
274 if (scat_req->req & HIF_BLOCK_BASIS)
275 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
276
277 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
278 scat_req->addr, scat_req->virt_dma_buf,
279 len);
280 goto scat_complete;
281 }
282
283 memset(&mmc_req, 0, sizeof(struct mmc_request));
284 memset(&cmd, 0, sizeof(struct mmc_command));
285 memset(&data, 0, sizeof(struct mmc_data));
286
287 ath6kl_sdio_setup_scat_data(scat_req, &data);
288
289 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
290 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
291
292 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
293
294
295 if (scat_req->req & HIF_WRITE) {
296 if (scat_req->addr == HIF_MBOX_BASE_ADDR)
297 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
298 else
299
300 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
301 }
302
303
304 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
305 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
306 data.blocks);
307
308 cmd.opcode = SD_IO_RW_EXTENDED;
309 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
310
311 mmc_req.cmd = &cmd;
312 mmc_req.data = &data;
313
314 sdio_claim_host(ar_sdio->func);
315
316 mmc_set_data_timeout(&data, ar_sdio->func->card);
317
318 trace_ath6kl_sdio_scat(scat_req->addr,
319 scat_req->req,
320 scat_req->len,
321 scat_req->scat_entries,
322 scat_req->scat_list);
323
324
325 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
326
327 sdio_release_host(ar_sdio->func);
328
329 status = cmd.error ? cmd.error : data.error;
330
331scat_complete:
332 scat_req->status = status;
333
334 if (scat_req->status)
335 ath6kl_err("Scatter write request failed:%d\n",
336 scat_req->status);
337
338 if (scat_req->req & HIF_ASYNCHRONOUS)
339 scat_req->complete(ar_sdio->ar->htc_target, scat_req);
340
341 return status;
342}
343
344static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
345 int n_scat_entry, int n_scat_req,
346 bool virt_scat)
347{
348 struct hif_scatter_req *s_req;
349 struct bus_request *bus_req;
350 int i, scat_req_sz, scat_list_sz, size;
351 u8 *virt_buf;
352
353 scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
354 scat_req_sz = sizeof(*s_req) + scat_list_sz;
355
356 if (!virt_scat)
357 size = sizeof(struct scatterlist) * n_scat_entry;
358 else
359 size = 2 * L1_CACHE_BYTES +
360 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
361
362 for (i = 0; i < n_scat_req; i++) {
363
364 s_req = kzalloc(scat_req_sz, GFP_KERNEL);
365 if (!s_req)
366 return -ENOMEM;
367
368 if (virt_scat) {
369 virt_buf = kzalloc(size, GFP_KERNEL);
370 if (!virt_buf) {
371 kfree(s_req);
372 return -ENOMEM;
373 }
374
375 s_req->virt_dma_buf =
376 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
377 } else {
378
379 s_req->sgentries = kzalloc(size, GFP_KERNEL);
380
381 if (!s_req->sgentries) {
382 kfree(s_req);
383 return -ENOMEM;
384 }
385 }
386
387
388 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
389 if (!bus_req) {
390 kfree(s_req->sgentries);
391 kfree(s_req->virt_dma_buf);
392 kfree(s_req);
393 return -ENOMEM;
394 }
395
396
397 bus_req->scat_req = s_req;
398 s_req->busrequest = bus_req;
399
400 s_req->virt_scat = virt_scat;
401
402
403 hif_scatter_req_add(ar_sdio->ar, s_req);
404 }
405
406 return 0;
407}
408
409static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
410 u32 len, u32 request)
411{
412 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
413 u8 *tbuf = NULL;
414 int ret;
415 bool bounced = false;
416
417 if (request & HIF_BLOCK_BASIS)
418 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
419
420 if (buf_needs_bounce(buf)) {
421 if (!ar_sdio->dma_buffer)
422 return -ENOMEM;
423 mutex_lock(&ar_sdio->dma_buffer_mutex);
424 tbuf = ar_sdio->dma_buffer;
425
426 if (request & HIF_WRITE)
427 memcpy(tbuf, buf, len);
428
429 bounced = true;
430 } else {
431 tbuf = buf;
432 }
433
434 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
435 if ((request & HIF_READ) && bounced)
436 memcpy(buf, tbuf, len);
437
438 if (bounced)
439 mutex_unlock(&ar_sdio->dma_buffer_mutex);
440
441 return ret;
442}
443
444static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
445 struct bus_request *req)
446{
447 if (req->scat_req) {
448 ath6kl_sdio_scat_rw(ar_sdio, req);
449 } else {
450 void *context;
451 int status;
452
453 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
454 req->buffer, req->length,
455 req->request);
456 context = req->packet;
457 ath6kl_sdio_free_bus_req(ar_sdio, req);
458 ath6kl_hif_rw_comp_handler(context, status);
459 }
460}
461
462static void ath6kl_sdio_write_async_work(struct work_struct *work)
463{
464 struct ath6kl_sdio *ar_sdio;
465 struct bus_request *req, *tmp_req;
466
467 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
468
469 spin_lock_bh(&ar_sdio->wr_async_lock);
470 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
471 list_del(&req->list);
472 spin_unlock_bh(&ar_sdio->wr_async_lock);
473 __ath6kl_sdio_write_async(ar_sdio, req);
474 spin_lock_bh(&ar_sdio->wr_async_lock);
475 }
476 spin_unlock_bh(&ar_sdio->wr_async_lock);
477}
478
479static void ath6kl_sdio_irq_handler(struct sdio_func *func)
480{
481 int status;
482 struct ath6kl_sdio *ar_sdio;
483
484 ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
485
486 ar_sdio = sdio_get_drvdata(func);
487 atomic_set(&ar_sdio->irq_handling, 1);
488
489
490
491
492 sdio_release_host(ar_sdio->func);
493
494 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
495 sdio_claim_host(ar_sdio->func);
496
497 atomic_set(&ar_sdio->irq_handling, 0);
498 wake_up(&ar_sdio->irq_wq);
499
500 WARN_ON(status && status != -ECANCELED);
501}
502
503static int ath6kl_sdio_power_on(struct ath6kl *ar)
504{
505 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
506 struct sdio_func *func = ar_sdio->func;
507 int ret = 0;
508
509 if (!ar_sdio->is_disabled)
510 return 0;
511
512 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
513
514 sdio_claim_host(func);
515
516 ret = sdio_enable_func(func);
517 if (ret) {
518 ath6kl_err("Unable to enable sdio func: %d)\n", ret);
519 sdio_release_host(func);
520 return ret;
521 }
522
523 sdio_release_host(func);
524
525
526
527
528
529 msleep(10);
530
531 ret = ath6kl_sdio_config(ar);
532 if (ret) {
533 ath6kl_err("Failed to config sdio: %d\n", ret);
534 goto out;
535 }
536
537 ar_sdio->is_disabled = false;
538
539out:
540 return ret;
541}
542
543static int ath6kl_sdio_power_off(struct ath6kl *ar)
544{
545 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
546 int ret;
547
548 if (ar_sdio->is_disabled)
549 return 0;
550
551 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
552
553
554 sdio_claim_host(ar_sdio->func);
555 ret = sdio_disable_func(ar_sdio->func);
556 sdio_release_host(ar_sdio->func);
557
558 if (ret)
559 return ret;
560
561 ar_sdio->is_disabled = true;
562
563 return ret;
564}
565
566static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
567 u32 length, u32 request,
568 struct htc_packet *packet)
569{
570 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
571 struct bus_request *bus_req;
572
573 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
574
575 if (WARN_ON_ONCE(!bus_req))
576 return -ENOMEM;
577
578 bus_req->address = address;
579 bus_req->buffer = buffer;
580 bus_req->length = length;
581 bus_req->request = request;
582 bus_req->packet = packet;
583
584 spin_lock_bh(&ar_sdio->wr_async_lock);
585 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
586 spin_unlock_bh(&ar_sdio->wr_async_lock);
587 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
588
589 return 0;
590}
591
592static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
593{
594 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
595 int ret;
596
597 sdio_claim_host(ar_sdio->func);
598
599
600 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
601 if (ret)
602 ath6kl_err("Failed to claim sdio irq: %d\n", ret);
603
604 sdio_release_host(ar_sdio->func);
605}
606
607static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
608{
609 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
610
611 return !atomic_read(&ar_sdio->irq_handling);
612}
613
614static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
615{
616 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
617 int ret;
618
619 sdio_claim_host(ar_sdio->func);
620
621 if (atomic_read(&ar_sdio->irq_handling)) {
622 sdio_release_host(ar_sdio->func);
623
624 ret = wait_event_interruptible(ar_sdio->irq_wq,
625 ath6kl_sdio_is_on_irq(ar));
626 if (ret)
627 return;
628
629 sdio_claim_host(ar_sdio->func);
630 }
631
632 ret = sdio_release_irq(ar_sdio->func);
633 if (ret)
634 ath6kl_err("Failed to release sdio irq: %d\n", ret);
635
636 sdio_release_host(ar_sdio->func);
637}
638
639static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
640{
641 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
642 struct hif_scatter_req *node = NULL;
643
644 spin_lock_bh(&ar_sdio->scat_lock);
645
646 if (!list_empty(&ar_sdio->scat_req)) {
647 node = list_first_entry(&ar_sdio->scat_req,
648 struct hif_scatter_req, list);
649 list_del(&node->list);
650
651 node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
652 }
653
654 spin_unlock_bh(&ar_sdio->scat_lock);
655
656 return node;
657}
658
659static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
660 struct hif_scatter_req *s_req)
661{
662 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
663
664 spin_lock_bh(&ar_sdio->scat_lock);
665
666 list_add_tail(&s_req->list, &ar_sdio->scat_req);
667
668 spin_unlock_bh(&ar_sdio->scat_lock);
669}
670
671
672static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
673 struct hif_scatter_req *scat_req)
674{
675 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
676 u32 request = scat_req->req;
677 int status = 0;
678
679 if (!scat_req->len)
680 return -EINVAL;
681
682 ath6kl_dbg(ATH6KL_DBG_SCATTER,
683 "hif-scatter: total len: %d scatter entries: %d\n",
684 scat_req->len, scat_req->scat_entries);
685
686 if (request & HIF_SYNCHRONOUS) {
687 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
688 } else {
689 spin_lock_bh(&ar_sdio->wr_async_lock);
690 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
691 spin_unlock_bh(&ar_sdio->wr_async_lock);
692 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
693 }
694
695 return status;
696}
697
698
699static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
700{
701 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
702 struct hif_scatter_req *s_req, *tmp_req;
703
704
705 spin_lock_bh(&ar_sdio->scat_lock);
706 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
707 list_del(&s_req->list);
708 spin_unlock_bh(&ar_sdio->scat_lock);
709
710
711
712
713
714
715 if (s_req->busrequest) {
716 s_req->busrequest->scat_req = NULL;
717 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
718 }
719 kfree(s_req->virt_dma_buf);
720 kfree(s_req->sgentries);
721 kfree(s_req);
722
723 spin_lock_bh(&ar_sdio->scat_lock);
724 }
725 spin_unlock_bh(&ar_sdio->scat_lock);
726
727 ar_sdio->scatter_enabled = false;
728}
729
730
731static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
732{
733 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
734 struct htc_target *target = ar->htc_target;
735 int ret = 0;
736 bool virt_scat = false;
737
738 if (ar_sdio->scatter_enabled)
739 return 0;
740
741 ar_sdio->scatter_enabled = true;
742
743
744 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
745 ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
746 ar_sdio->func->card->host->max_segs,
747 MAX_SCATTER_ENTRIES_PER_REQ);
748 virt_scat = true;
749 }
750
751 if (!virt_scat) {
752 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
753 MAX_SCATTER_ENTRIES_PER_REQ,
754 MAX_SCATTER_REQUESTS, virt_scat);
755
756 if (!ret) {
757 ath6kl_dbg(ATH6KL_DBG_BOOT,
758 "hif-scatter enabled requests %d entries %d\n",
759 MAX_SCATTER_REQUESTS,
760 MAX_SCATTER_ENTRIES_PER_REQ);
761
762 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
763 target->max_xfer_szper_scatreq =
764 MAX_SCATTER_REQ_TRANSFER_SIZE;
765 } else {
766 ath6kl_sdio_cleanup_scatter(ar);
767 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
768 }
769 }
770
771 if (virt_scat || ret) {
772 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
773 ATH6KL_SCATTER_ENTRIES_PER_REQ,
774 ATH6KL_SCATTER_REQS, virt_scat);
775
776 if (ret) {
777 ath6kl_err("failed to alloc virtual scatter resources !\n");
778 ath6kl_sdio_cleanup_scatter(ar);
779 return ret;
780 }
781
782 ath6kl_dbg(ATH6KL_DBG_BOOT,
783 "virtual scatter enabled requests %d entries %d\n",
784 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
785
786 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
787 target->max_xfer_szper_scatreq =
788 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
789 }
790
791 return 0;
792}
793
794static int ath6kl_sdio_config(struct ath6kl *ar)
795{
796 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
797 struct sdio_func *func = ar_sdio->func;
798 int ret;
799
800 sdio_claim_host(func);
801
802 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
803 MANUFACTURER_ID_AR6003_BASE) {
804
805 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
806 CCCR_SDIO_IRQ_MODE_REG,
807 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
808 if (ret) {
809 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
810 ret);
811 goto out;
812 }
813
814 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
815 }
816
817
818 func->enable_timeout = 100;
819
820 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
821 if (ret) {
822 ath6kl_err("Set sdio block size %d failed: %d)\n",
823 HIF_MBOX_BLOCK_SIZE, ret);
824 goto out;
825 }
826
827out:
828 sdio_release_host(func);
829
830 return ret;
831}
832
833static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar)
834{
835 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
836 struct sdio_func *func = ar_sdio->func;
837 mmc_pm_flag_t flags;
838 int ret;
839
840 flags = sdio_get_host_pm_caps(func);
841
842 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
843
844 if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
845 !(flags & MMC_PM_KEEP_POWER))
846 return -EINVAL;
847
848 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
849 if (ret) {
850 ath6kl_err("set sdio keep pwr flag failed: %d\n", ret);
851 return ret;
852 }
853
854
855 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
856 if (ret)
857 ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
858
859 return ret;
860}
861
862static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
863{
864 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
865 struct sdio_func *func = ar_sdio->func;
866 mmc_pm_flag_t flags;
867 bool try_deepsleep = false;
868 int ret;
869
870 if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
871 (!ar->suspend_mode && wow)) {
872 ret = ath6kl_set_sdio_pm_caps(ar);
873 if (ret)
874 goto cut_pwr;
875
876 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
877 if (ret && ret != -ENOTCONN)
878 ath6kl_err("wow suspend failed: %d\n", ret);
879
880 if (ret &&
881 (!ar->wow_suspend_mode ||
882 ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP))
883 try_deepsleep = true;
884 else if (ret &&
885 ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR)
886 goto cut_pwr;
887 if (!ret)
888 return 0;
889 }
890
891 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
892 !ar->suspend_mode || try_deepsleep) {
893 flags = sdio_get_host_pm_caps(func);
894 if (!(flags & MMC_PM_KEEP_POWER))
895 goto cut_pwr;
896
897 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
898 if (ret)
899 goto cut_pwr;
900
901
902
903
904
905
906
907 if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
908 ret = sdio_set_host_pm_flags(func,
909 MMC_PM_WAKE_SDIO_IRQ);
910 if (ret)
911 goto cut_pwr;
912 }
913
914 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
915 NULL);
916 if (ret)
917 goto cut_pwr;
918
919 return 0;
920 }
921
922cut_pwr:
923 if (func->card && func->card->host)
924 func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER;
925
926 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
927}
928
929static int ath6kl_sdio_resume(struct ath6kl *ar)
930{
931 switch (ar->state) {
932 case ATH6KL_STATE_OFF:
933 case ATH6KL_STATE_CUTPOWER:
934 ath6kl_dbg(ATH6KL_DBG_SUSPEND,
935 "sdio resume configuring sdio\n");
936
937
938 ath6kl_sdio_config(ar);
939 break;
940
941 case ATH6KL_STATE_ON:
942 break;
943
944 case ATH6KL_STATE_DEEPSLEEP:
945 break;
946
947 case ATH6KL_STATE_WOW:
948 break;
949
950 case ATH6KL_STATE_SUSPENDING:
951 break;
952
953 case ATH6KL_STATE_RESUMING:
954 break;
955
956 case ATH6KL_STATE_RECOVERY:
957 break;
958 }
959
960 ath6kl_cfg80211_resume(ar);
961
962 return 0;
963}
964
965
966static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
967{
968 int status;
969 u8 addr_val[4];
970 s32 i;
971
972
973
974
975
976
977 for (i = 1; i <= 3; i++) {
978
979
980
981
982 memset(addr_val, ((u8 *)&addr)[i], 4);
983
984
985
986
987
988
989 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
990 4, HIF_WR_SYNC_BYTE_FIX);
991 if (status)
992 break;
993 }
994
995 if (status) {
996 ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n",
997 __func__, addr, reg_addr);
998 return status;
999 }
1000
1001
1002
1003
1004
1005
1006
1007 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
1008 4, HIF_WR_SYNC_BYTE_INC);
1009
1010 if (status) {
1011 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
1012 __func__, addr, reg_addr);
1013 return status;
1014 }
1015
1016 return 0;
1017}
1018
1019static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
1020{
1021 int status;
1022
1023
1024 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
1025 address);
1026
1027 if (status)
1028 return status;
1029
1030
1031 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1032 (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
1033 if (status) {
1034 ath6kl_err("%s: failed to read from window data addr\n",
1035 __func__);
1036 return status;
1037 }
1038
1039 return status;
1040}
1041
1042static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
1043 __le32 data)
1044{
1045 int status;
1046 u32 val = (__force u32) data;
1047
1048
1049 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1050 (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
1051 if (status) {
1052 ath6kl_err("%s: failed to write 0x%x to window data addr\n",
1053 __func__, data);
1054 return status;
1055 }
1056
1057
1058 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
1059 address);
1060}
1061
1062static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
1063{
1064 u32 addr;
1065 unsigned long timeout;
1066 int ret;
1067
1068 ar->bmi.cmd_credits = 0;
1069
1070
1071 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
1072
1073 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1074 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
1075
1076
1077
1078
1079
1080
1081 ret = ath6kl_sdio_read_write_sync(ar, addr,
1082 (u8 *)&ar->bmi.cmd_credits, 4,
1083 HIF_RD_SYNC_BYTE_INC);
1084 if (ret) {
1085 ath6kl_err("Unable to decrement the command credit count register: %d\n",
1086 ret);
1087 return ret;
1088 }
1089
1090
1091
1092
1093 ar->bmi.cmd_credits &= 0xFF;
1094 }
1095
1096 if (!ar->bmi.cmd_credits) {
1097 ath6kl_err("bmi communication timeout\n");
1098 return -ETIMEDOUT;
1099 }
1100
1101 return 0;
1102}
1103
1104static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
1105{
1106 unsigned long timeout;
1107 u32 rx_word = 0;
1108 int ret = 0;
1109
1110 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1111 while ((time_before(jiffies, timeout)) && !rx_word) {
1112 ret = ath6kl_sdio_read_write_sync(ar,
1113 RX_LOOKAHEAD_VALID_ADDRESS,
1114 (u8 *)&rx_word, sizeof(rx_word),
1115 HIF_RD_SYNC_BYTE_INC);
1116 if (ret) {
1117 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
1118 return ret;
1119 }
1120
1121
1122 rx_word &= (1 << ENDPOINT1);
1123 }
1124
1125 if (!rx_word) {
1126 ath6kl_err("bmi_recv_buf FIFO empty\n");
1127 return -EINVAL;
1128 }
1129
1130 return ret;
1131}
1132
1133static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1134{
1135 int ret;
1136 u32 addr;
1137
1138 ret = ath6kl_sdio_bmi_credits(ar);
1139 if (ret)
1140 return ret;
1141
1142 addr = ar->mbox_info.htc_addr;
1143
1144 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1145 HIF_WR_SYNC_BYTE_INC);
1146 if (ret) {
1147 ath6kl_err("unable to send the bmi data to the device\n");
1148 return ret;
1149 }
1150
1151 return 0;
1152}
1153
1154static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
1155{
1156 int ret;
1157 u32 addr;
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 if (len >= 4) {
1206 ret = ath6kl_bmi_get_rx_lkahd(ar);
1207 if (ret)
1208 return ret;
1209 }
1210
1211 addr = ar->mbox_info.htc_addr;
1212 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1213 HIF_RD_SYNC_BYTE_INC);
1214 if (ret) {
1215 ath6kl_err("Unable to read the bmi data from the device: %d\n",
1216 ret);
1217 return ret;
1218 }
1219
1220 return 0;
1221}
1222
1223static void ath6kl_sdio_stop(struct ath6kl *ar)
1224{
1225 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
1226 struct bus_request *req, *tmp_req;
1227 void *context;
1228
1229
1230
1231 cancel_work_sync(&ar_sdio->wr_async_work);
1232
1233 spin_lock_bh(&ar_sdio->wr_async_lock);
1234
1235 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1236 list_del(&req->list);
1237
1238 if (req->scat_req) {
1239
1240 req->scat_req->status = -ECANCELED;
1241 req->scat_req->complete(ar_sdio->ar->htc_target,
1242 req->scat_req);
1243 } else {
1244 context = req->packet;
1245 ath6kl_sdio_free_bus_req(ar_sdio, req);
1246 ath6kl_hif_rw_comp_handler(context, -ECANCELED);
1247 }
1248 }
1249
1250 spin_unlock_bh(&ar_sdio->wr_async_lock);
1251
1252 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
1253}
1254
1255static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
1256 .read_write_sync = ath6kl_sdio_read_write_sync,
1257 .write_async = ath6kl_sdio_write_async,
1258 .irq_enable = ath6kl_sdio_irq_enable,
1259 .irq_disable = ath6kl_sdio_irq_disable,
1260 .scatter_req_get = ath6kl_sdio_scatter_req_get,
1261 .scatter_req_add = ath6kl_sdio_scatter_req_add,
1262 .enable_scatter = ath6kl_sdio_enable_scatter,
1263 .scat_req_rw = ath6kl_sdio_async_rw_scatter,
1264 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
1265 .suspend = ath6kl_sdio_suspend,
1266 .resume = ath6kl_sdio_resume,
1267 .diag_read32 = ath6kl_sdio_diag_read32,
1268 .diag_write32 = ath6kl_sdio_diag_write32,
1269 .bmi_read = ath6kl_sdio_bmi_read,
1270 .bmi_write = ath6kl_sdio_bmi_write,
1271 .power_on = ath6kl_sdio_power_on,
1272 .power_off = ath6kl_sdio_power_off,
1273 .stop = ath6kl_sdio_stop,
1274};
1275
1276#ifdef CONFIG_PM_SLEEP
1277
1278
1279
1280
1281
1282static int ath6kl_sdio_pm_suspend(struct device *device)
1283{
1284 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
1285
1286 return 0;
1287}
1288
1289static int ath6kl_sdio_pm_resume(struct device *device)
1290{
1291 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
1292
1293 return 0;
1294}
1295
1296static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
1297 ath6kl_sdio_pm_resume);
1298
1299#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
1300
1301#else
1302
1303#define ATH6KL_SDIO_PM_OPS NULL
1304
1305#endif
1306
1307static int ath6kl_sdio_probe(struct sdio_func *func,
1308 const struct sdio_device_id *id)
1309{
1310 int ret;
1311 struct ath6kl_sdio *ar_sdio;
1312 struct ath6kl *ar;
1313 int count;
1314
1315 ath6kl_dbg(ATH6KL_DBG_BOOT,
1316 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
1317 func->num, func->vendor, func->device,
1318 func->max_blksize, func->cur_blksize);
1319
1320 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
1321 if (!ar_sdio)
1322 return -ENOMEM;
1323
1324 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
1325 if (!ar_sdio->dma_buffer) {
1326 ret = -ENOMEM;
1327 goto err_hif;
1328 }
1329
1330 ar_sdio->func = func;
1331 sdio_set_drvdata(func, ar_sdio);
1332
1333 ar_sdio->id = id;
1334 ar_sdio->is_disabled = true;
1335
1336 spin_lock_init(&ar_sdio->lock);
1337 spin_lock_init(&ar_sdio->scat_lock);
1338 spin_lock_init(&ar_sdio->wr_async_lock);
1339 mutex_init(&ar_sdio->dma_buffer_mutex);
1340
1341 INIT_LIST_HEAD(&ar_sdio->scat_req);
1342 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
1343 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
1344
1345 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
1346
1347 init_waitqueue_head(&ar_sdio->irq_wq);
1348
1349 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
1350 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
1351
1352 ar = ath6kl_core_create(&ar_sdio->func->dev);
1353 if (!ar) {
1354 ath6kl_err("Failed to alloc ath6kl core\n");
1355 ret = -ENOMEM;
1356 goto err_dma;
1357 }
1358
1359 ar_sdio->ar = ar;
1360 ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
1361 ar->hif_priv = ar_sdio;
1362 ar->hif_ops = &ath6kl_sdio_ops;
1363 ar->bmi.max_data_size = 256;
1364
1365 ath6kl_sdio_set_mbox_info(ar);
1366
1367 ret = ath6kl_sdio_config(ar);
1368 if (ret) {
1369 ath6kl_err("Failed to config sdio: %d\n", ret);
1370 goto err_core_alloc;
1371 }
1372
1373 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX);
1374 if (ret) {
1375 ath6kl_err("Failed to init ath6kl core\n");
1376 goto err_core_alloc;
1377 }
1378
1379 return ret;
1380
1381err_core_alloc:
1382 ath6kl_core_destroy(ar_sdio->ar);
1383err_dma:
1384 kfree(ar_sdio->dma_buffer);
1385err_hif:
1386 kfree(ar_sdio);
1387
1388 return ret;
1389}
1390
1391static void ath6kl_sdio_remove(struct sdio_func *func)
1392{
1393 struct ath6kl_sdio *ar_sdio;
1394
1395 ath6kl_dbg(ATH6KL_DBG_BOOT,
1396 "sdio removed func %d vendor 0x%x device 0x%x\n",
1397 func->num, func->vendor, func->device);
1398
1399 ar_sdio = sdio_get_drvdata(func);
1400
1401 ath6kl_stop_txrx(ar_sdio->ar);
1402 cancel_work_sync(&ar_sdio->wr_async_work);
1403
1404 ath6kl_core_cleanup(ar_sdio->ar);
1405 ath6kl_core_destroy(ar_sdio->ar);
1406
1407 kfree(ar_sdio->dma_buffer);
1408 kfree(ar_sdio);
1409}
1410
1411static const struct sdio_device_id ath6kl_sdio_devices[] = {
1412 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
1413 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
1414 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
1415 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
1416 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
1417 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
1418 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x19))},
1419 {},
1420};
1421
1422MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
1423
1424static struct sdio_driver ath6kl_sdio_driver = {
1425 .name = "ath6kl_sdio",
1426 .id_table = ath6kl_sdio_devices,
1427 .probe = ath6kl_sdio_probe,
1428 .remove = ath6kl_sdio_remove,
1429 .drv.pm = ATH6KL_SDIO_PM_OPS,
1430};
1431
1432static int __init ath6kl_sdio_init(void)
1433{
1434 int ret;
1435
1436 ret = sdio_register_driver(&ath6kl_sdio_driver);
1437 if (ret)
1438 ath6kl_err("sdio driver registration failed: %d\n", ret);
1439
1440 return ret;
1441}
1442
1443static void __exit ath6kl_sdio_exit(void)
1444{
1445 sdio_unregister_driver(&ath6kl_sdio_driver);
1446}
1447
1448module_init(ath6kl_sdio_init);
1449module_exit(ath6kl_sdio_exit);
1450
1451MODULE_AUTHOR("Atheros Communications, Inc.");
1452MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
1453MODULE_LICENSE("Dual BSD/GPL");
1454
1455MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE);
1456MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE);
1457MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE);
1458MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
1459MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
1460MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE);
1461MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE);
1462MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE);
1463MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
1464MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
1465MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE);
1466MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
1467MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
1468MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
1469MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
1470MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
1471MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE);
1472MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
1473MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
1474MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE);
1475MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE);
1476MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE);
1477