1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/mmc.h>
21#include <linux/mmc/host.h>
22#include <linux/mmc/sdio_func.h>
23#include <linux/mmc/sdio_ids.h>
24#include <linux/mmc/sdio.h>
25#include <linux/mmc/sd.h>
26#include "hif.h"
27#include "hif-ops.h"
28#include "target.h"
29#include "debug.h"
30#include "cfg80211.h"
31#include "trace.h"
32
33struct ath6kl_sdio {
34 struct sdio_func *func;
35
36
37 spinlock_t lock;
38
39
40 struct list_head bus_req_freeq;
41
42
43 struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
44
45 struct ath6kl *ar;
46
47 u8 *dma_buffer;
48
49
50 struct mutex dma_buffer_mutex;
51
52
53 struct list_head scat_req;
54
55 atomic_t irq_handling;
56 wait_queue_head_t irq_wq;
57
58
59 spinlock_t scat_lock;
60
61 bool scatter_enabled;
62
63 bool is_disabled;
64 const struct sdio_device_id *id;
65 struct work_struct wr_async_work;
66 struct list_head wr_asyncq;
67
68
69 spinlock_t wr_async_lock;
70};
71
72#define CMD53_ARG_READ 0
73#define CMD53_ARG_WRITE 1
74#define CMD53_ARG_BLOCK_BASIS 1
75#define CMD53_ARG_FIXED_ADDRESS 0
76#define CMD53_ARG_INCR_ADDRESS 1
77
78static int ath6kl_sdio_config(struct ath6kl *ar);
79
80static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
81{
82 return ar->hif_priv;
83}
84
85
86
87
88
89
90
91static inline bool buf_needs_bounce(u8 *buf)
92{
93 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
94}
95
96static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
97{
98 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
99
100
101 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
102 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
103 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
104 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
105 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
106 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
107}
108
109static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
110 u8 mode, u8 opcode, u32 addr,
111 u16 blksz)
112{
113 *arg = (((rw & 1) << 31) |
114 ((func & 0x7) << 28) |
115 ((mode & 1) << 27) |
116 ((opcode & 1) << 26) |
117 ((addr & 0x1FFFF) << 9) |
118 (blksz & 0x1FF));
119}
120
121static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
122 unsigned int address,
123 unsigned char val)
124{
125 const u8 func = 0;
126
127 *arg = ((write & 1) << 31) |
128 ((func & 0x7) << 28) |
129 ((raw & 1) << 27) |
130 (1 << 26) |
131 ((address & 0x1FFFF) << 9) |
132 (1 << 8) |
133 (val & 0xFF);
134}
135
136static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
137 unsigned int address,
138 unsigned char byte)
139{
140 struct mmc_command io_cmd;
141
142 memset(&io_cmd, 0, sizeof(io_cmd));
143 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
144 io_cmd.opcode = SD_IO_RW_DIRECT;
145 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
146
147 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
148}
149
150static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
151 u8 *buf, u32 len)
152{
153 int ret = 0;
154
155 sdio_claim_host(func);
156
157 if (request & HIF_WRITE) {
158
159 if (addr >= HIF_MBOX_BASE_ADDR &&
160 addr <= HIF_MBOX_END_ADDR)
161 addr += (HIF_MBOX_WIDTH - len);
162
163
164 if (addr == HIF_MBOX0_EXT_BASE_ADDR)
165 addr += HIF_MBOX0_EXT_WIDTH - len;
166
167 if (request & HIF_FIXED_ADDRESS)
168 ret = sdio_writesb(func, addr, buf, len);
169 else
170 ret = sdio_memcpy_toio(func, addr, buf, len);
171 } else {
172 if (request & HIF_FIXED_ADDRESS)
173 ret = sdio_readsb(func, buf, addr, len);
174 else
175 ret = sdio_memcpy_fromio(func, buf, addr, len);
176 }
177
178 sdio_release_host(func);
179
180 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
181 request & HIF_WRITE ? "wr" : "rd", addr,
182 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
183 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
184
185 trace_ath6kl_sdio(addr, request, buf, len);
186
187 return ret;
188}
189
190static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
191{
192 struct bus_request *bus_req;
193
194 spin_lock_bh(&ar_sdio->lock);
195
196 if (list_empty(&ar_sdio->bus_req_freeq)) {
197 spin_unlock_bh(&ar_sdio->lock);
198 return NULL;
199 }
200
201 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
202 struct bus_request, list);
203 list_del(&bus_req->list);
204
205 spin_unlock_bh(&ar_sdio->lock);
206 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
207 __func__, bus_req);
208
209 return bus_req;
210}
211
212static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
213 struct bus_request *bus_req)
214{
215 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
216 __func__, bus_req);
217
218 spin_lock_bh(&ar_sdio->lock);
219 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
220 spin_unlock_bh(&ar_sdio->lock);
221}
222
223static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
224 struct mmc_data *data)
225{
226 struct scatterlist *sg;
227 int i;
228
229 data->blksz = HIF_MBOX_BLOCK_SIZE;
230 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
231
232 ath6kl_dbg(ATH6KL_DBG_SCATTER,
233 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
234 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
235 data->blksz, data->blocks, scat_req->len,
236 scat_req->scat_entries);
237
238 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
239 MMC_DATA_READ;
240
241
242 sg = scat_req->sgentries;
243 sg_init_table(sg, scat_req->scat_entries);
244
245
246 for (i = 0; i < scat_req->scat_entries; i++, sg++) {
247 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
248 i, scat_req->scat_list[i].buf,
249 scat_req->scat_list[i].len);
250
251 sg_set_buf(sg, scat_req->scat_list[i].buf,
252 scat_req->scat_list[i].len);
253 }
254
255
256 data->sg = scat_req->sgentries;
257 data->sg_len = scat_req->scat_entries;
258}
259
260static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
261 struct bus_request *req)
262{
263 struct mmc_request mmc_req;
264 struct mmc_command cmd;
265 struct mmc_data data;
266 struct hif_scatter_req *scat_req;
267 u8 opcode, rw;
268 int status, len;
269
270 scat_req = req->scat_req;
271
272 if (scat_req->virt_scat) {
273 len = scat_req->len;
274 if (scat_req->req & HIF_BLOCK_BASIS)
275 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
276
277 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
278 scat_req->addr, scat_req->virt_dma_buf,
279 len);
280 goto scat_complete;
281 }
282
283 memset(&mmc_req, 0, sizeof(struct mmc_request));
284 memset(&cmd, 0, sizeof(struct mmc_command));
285 memset(&data, 0, sizeof(struct mmc_data));
286
287 ath6kl_sdio_setup_scat_data(scat_req, &data);
288
289 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
290 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
291
292 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
293
294
295 if (scat_req->req & HIF_WRITE) {
296 if (scat_req->addr == HIF_MBOX_BASE_ADDR)
297 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
298 else
299
300 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
301 }
302
303
304 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
305 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
306 data.blocks);
307
308 cmd.opcode = SD_IO_RW_EXTENDED;
309 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
310
311 mmc_req.cmd = &cmd;
312 mmc_req.data = &data;
313
314 sdio_claim_host(ar_sdio->func);
315
316 mmc_set_data_timeout(&data, ar_sdio->func->card);
317
318 trace_ath6kl_sdio_scat(scat_req->addr,
319 scat_req->req,
320 scat_req->len,
321 scat_req->scat_entries,
322 scat_req->scat_list);
323
324
325 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
326
327 sdio_release_host(ar_sdio->func);
328
329 status = cmd.error ? cmd.error : data.error;
330
331scat_complete:
332 scat_req->status = status;
333
334 if (scat_req->status)
335 ath6kl_err("Scatter write request failed:%d\n",
336 scat_req->status);
337
338 if (scat_req->req & HIF_ASYNCHRONOUS)
339 scat_req->complete(ar_sdio->ar->htc_target, scat_req);
340
341 return status;
342}
343
344static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
345 int n_scat_entry, int n_scat_req,
346 bool virt_scat)
347{
348 struct hif_scatter_req *s_req;
349 struct bus_request *bus_req;
350 int i, scat_req_sz, scat_list_sz, size;
351 u8 *virt_buf;
352
353 scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
354 scat_req_sz = sizeof(*s_req) + scat_list_sz;
355
356 if (!virt_scat)
357 size = sizeof(struct scatterlist) * n_scat_entry;
358 else
359 size = 2 * L1_CACHE_BYTES +
360 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
361
362 for (i = 0; i < n_scat_req; i++) {
363
364 s_req = kzalloc(scat_req_sz, GFP_KERNEL);
365 if (!s_req)
366 return -ENOMEM;
367
368 if (virt_scat) {
369 virt_buf = kzalloc(size, GFP_KERNEL);
370 if (!virt_buf) {
371 kfree(s_req);
372 return -ENOMEM;
373 }
374
375 s_req->virt_dma_buf =
376 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
377 } else {
378
379 s_req->sgentries = kzalloc(size, GFP_KERNEL);
380
381 if (!s_req->sgentries) {
382 kfree(s_req);
383 return -ENOMEM;
384 }
385 }
386
387
388 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
389 if (!bus_req) {
390 kfree(s_req->sgentries);
391 kfree(s_req->virt_dma_buf);
392 kfree(s_req);
393 return -ENOMEM;
394 }
395
396
397 bus_req->scat_req = s_req;
398 s_req->busrequest = bus_req;
399
400 s_req->virt_scat = virt_scat;
401
402
403 hif_scatter_req_add(ar_sdio->ar, s_req);
404 }
405
406 return 0;
407}
408
409static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
410 u32 len, u32 request)
411{
412 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
413 u8 *tbuf = NULL;
414 int ret;
415 bool bounced = false;
416
417 if (request & HIF_BLOCK_BASIS)
418 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
419
420 if (buf_needs_bounce(buf)) {
421 if (!ar_sdio->dma_buffer)
422 return -ENOMEM;
423 mutex_lock(&ar_sdio->dma_buffer_mutex);
424 tbuf = ar_sdio->dma_buffer;
425
426 if (request & HIF_WRITE)
427 memcpy(tbuf, buf, len);
428
429 bounced = true;
430 } else {
431 tbuf = buf;
432 }
433
434 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
435 if ((request & HIF_READ) && bounced)
436 memcpy(buf, tbuf, len);
437
438 if (bounced)
439 mutex_unlock(&ar_sdio->dma_buffer_mutex);
440
441 return ret;
442}
443
444static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
445 struct bus_request *req)
446{
447 if (req->scat_req) {
448 ath6kl_sdio_scat_rw(ar_sdio, req);
449 } else {
450 void *context;
451 int status;
452
453 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
454 req->buffer, req->length,
455 req->request);
456 context = req->packet;
457 ath6kl_sdio_free_bus_req(ar_sdio, req);
458 ath6kl_hif_rw_comp_handler(context, status);
459 }
460}
461
462static void ath6kl_sdio_write_async_work(struct work_struct *work)
463{
464 struct ath6kl_sdio *ar_sdio;
465 struct bus_request *req, *tmp_req;
466
467 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
468
469 spin_lock_bh(&ar_sdio->wr_async_lock);
470 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
471 list_del(&req->list);
472 spin_unlock_bh(&ar_sdio->wr_async_lock);
473 __ath6kl_sdio_write_async(ar_sdio, req);
474 spin_lock_bh(&ar_sdio->wr_async_lock);
475 }
476 spin_unlock_bh(&ar_sdio->wr_async_lock);
477}
478
479static void ath6kl_sdio_irq_handler(struct sdio_func *func)
480{
481 int status;
482 struct ath6kl_sdio *ar_sdio;
483
484 ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
485
486 ar_sdio = sdio_get_drvdata(func);
487 atomic_set(&ar_sdio->irq_handling, 1);
488
489
490
491
492 sdio_release_host(ar_sdio->func);
493
494 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
495 sdio_claim_host(ar_sdio->func);
496
497 atomic_set(&ar_sdio->irq_handling, 0);
498 wake_up(&ar_sdio->irq_wq);
499
500 WARN_ON(status && status != -ECANCELED);
501}
502
503static int ath6kl_sdio_power_on(struct ath6kl *ar)
504{
505 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
506 struct sdio_func *func = ar_sdio->func;
507 int ret = 0;
508
509 if (!ar_sdio->is_disabled)
510 return 0;
511
512 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
513
514 sdio_claim_host(func);
515
516 ret = sdio_enable_func(func);
517 if (ret) {
518 ath6kl_err("Unable to enable sdio func: %d)\n", ret);
519 sdio_release_host(func);
520 return ret;
521 }
522
523 sdio_release_host(func);
524
525
526
527
528
529 msleep(10);
530
531 ret = ath6kl_sdio_config(ar);
532 if (ret) {
533 ath6kl_err("Failed to config sdio: %d\n", ret);
534 goto out;
535 }
536
537 ar_sdio->is_disabled = false;
538
539out:
540 return ret;
541}
542
543static int ath6kl_sdio_power_off(struct ath6kl *ar)
544{
545 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
546 int ret;
547
548 if (ar_sdio->is_disabled)
549 return 0;
550
551 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
552
553
554 sdio_claim_host(ar_sdio->func);
555 ret = sdio_disable_func(ar_sdio->func);
556 sdio_release_host(ar_sdio->func);
557
558 if (ret)
559 return ret;
560
561 ar_sdio->is_disabled = true;
562
563 return ret;
564}
565
566static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
567 u32 length, u32 request,
568 struct htc_packet *packet)
569{
570 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
571 struct bus_request *bus_req;
572
573 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
574
575 if (WARN_ON_ONCE(!bus_req))
576 return -ENOMEM;
577
578 bus_req->address = address;
579 bus_req->buffer = buffer;
580 bus_req->length = length;
581 bus_req->request = request;
582 bus_req->packet = packet;
583
584 spin_lock_bh(&ar_sdio->wr_async_lock);
585 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
586 spin_unlock_bh(&ar_sdio->wr_async_lock);
587 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
588
589 return 0;
590}
591
592static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
593{
594 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
595 int ret;
596
597 sdio_claim_host(ar_sdio->func);
598
599
600 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
601 if (ret)
602 ath6kl_err("Failed to claim sdio irq: %d\n", ret);
603
604 sdio_release_host(ar_sdio->func);
605}
606
607static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
608{
609 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
610
611 return !atomic_read(&ar_sdio->irq_handling);
612}
613
614static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
615{
616 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
617 int ret;
618
619 sdio_claim_host(ar_sdio->func);
620
621 if (atomic_read(&ar_sdio->irq_handling)) {
622 sdio_release_host(ar_sdio->func);
623
624 ret = wait_event_interruptible(ar_sdio->irq_wq,
625 ath6kl_sdio_is_on_irq(ar));
626 if (ret)
627 return;
628
629 sdio_claim_host(ar_sdio->func);
630 }
631
632 ret = sdio_release_irq(ar_sdio->func);
633 if (ret)
634 ath6kl_err("Failed to release sdio irq: %d\n", ret);
635
636 sdio_release_host(ar_sdio->func);
637}
638
639static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
640{
641 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
642 struct hif_scatter_req *node = NULL;
643
644 spin_lock_bh(&ar_sdio->scat_lock);
645
646 if (!list_empty(&ar_sdio->scat_req)) {
647 node = list_first_entry(&ar_sdio->scat_req,
648 struct hif_scatter_req, list);
649 list_del(&node->list);
650
651 node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
652 }
653
654 spin_unlock_bh(&ar_sdio->scat_lock);
655
656 return node;
657}
658
659static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
660 struct hif_scatter_req *s_req)
661{
662 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
663
664 spin_lock_bh(&ar_sdio->scat_lock);
665
666 list_add_tail(&s_req->list, &ar_sdio->scat_req);
667
668 spin_unlock_bh(&ar_sdio->scat_lock);
669}
670
671
672static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
673 struct hif_scatter_req *scat_req)
674{
675 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
676 u32 request = scat_req->req;
677 int status = 0;
678
679 if (!scat_req->len)
680 return -EINVAL;
681
682 ath6kl_dbg(ATH6KL_DBG_SCATTER,
683 "hif-scatter: total len: %d scatter entries: %d\n",
684 scat_req->len, scat_req->scat_entries);
685
686 if (request & HIF_SYNCHRONOUS) {
687 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
688 } else {
689 spin_lock_bh(&ar_sdio->wr_async_lock);
690 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
691 spin_unlock_bh(&ar_sdio->wr_async_lock);
692 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
693 }
694
695 return status;
696}
697
698
699static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
700{
701 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
702 struct hif_scatter_req *s_req, *tmp_req;
703
704
705 spin_lock_bh(&ar_sdio->scat_lock);
706 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
707 list_del(&s_req->list);
708 spin_unlock_bh(&ar_sdio->scat_lock);
709
710
711
712
713
714
715 if (s_req->busrequest) {
716 s_req->busrequest->scat_req = NULL;
717 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
718 }
719 kfree(s_req->virt_dma_buf);
720 kfree(s_req->sgentries);
721 kfree(s_req);
722
723 spin_lock_bh(&ar_sdio->scat_lock);
724 }
725 spin_unlock_bh(&ar_sdio->scat_lock);
726
727 ar_sdio->scatter_enabled = false;
728}
729
730
731static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
732{
733 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
734 struct htc_target *target = ar->htc_target;
735 int ret = 0;
736 bool virt_scat = false;
737
738 if (ar_sdio->scatter_enabled)
739 return 0;
740
741 ar_sdio->scatter_enabled = true;
742
743
744 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
745 ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
746 ar_sdio->func->card->host->max_segs,
747 MAX_SCATTER_ENTRIES_PER_REQ);
748 virt_scat = true;
749 }
750
751 if (!virt_scat) {
752 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
753 MAX_SCATTER_ENTRIES_PER_REQ,
754 MAX_SCATTER_REQUESTS, virt_scat);
755
756 if (!ret) {
757 ath6kl_dbg(ATH6KL_DBG_BOOT,
758 "hif-scatter enabled requests %d entries %d\n",
759 MAX_SCATTER_REQUESTS,
760 MAX_SCATTER_ENTRIES_PER_REQ);
761
762 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
763 target->max_xfer_szper_scatreq =
764 MAX_SCATTER_REQ_TRANSFER_SIZE;
765 } else {
766 ath6kl_sdio_cleanup_scatter(ar);
767 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
768 }
769 }
770
771 if (virt_scat || ret) {
772 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
773 ATH6KL_SCATTER_ENTRIES_PER_REQ,
774 ATH6KL_SCATTER_REQS, virt_scat);
775
776 if (ret) {
777 ath6kl_err("failed to alloc virtual scatter resources !\n");
778 ath6kl_sdio_cleanup_scatter(ar);
779 return ret;
780 }
781
782 ath6kl_dbg(ATH6KL_DBG_BOOT,
783 "virtual scatter enabled requests %d entries %d\n",
784 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
785
786 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
787 target->max_xfer_szper_scatreq =
788 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
789 }
790
791 return 0;
792}
793
794static int ath6kl_sdio_config(struct ath6kl *ar)
795{
796 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
797 struct sdio_func *func = ar_sdio->func;
798 int ret;
799
800 sdio_claim_host(func);
801
802 if (ar_sdio->id->device >= SDIO_DEVICE_ID_ATHEROS_AR6003_00) {
803
804 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
805 CCCR_SDIO_IRQ_MODE_REG,
806 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
807 if (ret) {
808 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
809 ret);
810 goto out;
811 }
812
813 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
814 }
815
816
817 func->enable_timeout = 100;
818
819 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
820 if (ret) {
821 ath6kl_err("Set sdio block size %d failed: %d)\n",
822 HIF_MBOX_BLOCK_SIZE, ret);
823 goto out;
824 }
825
826out:
827 sdio_release_host(func);
828
829 return ret;
830}
831
832static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar)
833{
834 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
835 struct sdio_func *func = ar_sdio->func;
836 mmc_pm_flag_t flags;
837 int ret;
838
839 flags = sdio_get_host_pm_caps(func);
840
841 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
842
843 if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
844 !(flags & MMC_PM_KEEP_POWER))
845 return -EINVAL;
846
847 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
848 if (ret) {
849 ath6kl_err("set sdio keep pwr flag failed: %d\n", ret);
850 return ret;
851 }
852
853
854 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
855 if (ret)
856 ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
857
858 return ret;
859}
860
861static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
862{
863 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
864 struct sdio_func *func = ar_sdio->func;
865 mmc_pm_flag_t flags;
866 bool try_deepsleep = false;
867 int ret;
868
869 if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
870 (!ar->suspend_mode && wow)) {
871 ret = ath6kl_set_sdio_pm_caps(ar);
872 if (ret)
873 goto cut_pwr;
874
875 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
876 if (ret && ret != -ENOTCONN)
877 ath6kl_err("wow suspend failed: %d\n", ret);
878
879 if (ret &&
880 (!ar->wow_suspend_mode ||
881 ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP))
882 try_deepsleep = true;
883 else if (ret &&
884 ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR)
885 goto cut_pwr;
886 if (!ret)
887 return 0;
888 }
889
890 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
891 !ar->suspend_mode || try_deepsleep) {
892 flags = sdio_get_host_pm_caps(func);
893 if (!(flags & MMC_PM_KEEP_POWER))
894 goto cut_pwr;
895
896 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
897 if (ret)
898 goto cut_pwr;
899
900
901
902
903
904
905
906 if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
907 ret = sdio_set_host_pm_flags(func,
908 MMC_PM_WAKE_SDIO_IRQ);
909 if (ret)
910 goto cut_pwr;
911 }
912
913 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
914 NULL);
915 if (ret)
916 goto cut_pwr;
917
918 return 0;
919 }
920
921cut_pwr:
922 if (func->card && func->card->host)
923 func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER;
924
925 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
926}
927
928static int ath6kl_sdio_resume(struct ath6kl *ar)
929{
930 switch (ar->state) {
931 case ATH6KL_STATE_OFF:
932 case ATH6KL_STATE_CUTPOWER:
933 ath6kl_dbg(ATH6KL_DBG_SUSPEND,
934 "sdio resume configuring sdio\n");
935
936
937 ath6kl_sdio_config(ar);
938 break;
939
940 case ATH6KL_STATE_ON:
941 break;
942
943 case ATH6KL_STATE_DEEPSLEEP:
944 break;
945
946 case ATH6KL_STATE_WOW:
947 break;
948
949 case ATH6KL_STATE_SUSPENDING:
950 break;
951
952 case ATH6KL_STATE_RESUMING:
953 break;
954
955 case ATH6KL_STATE_RECOVERY:
956 break;
957 }
958
959 ath6kl_cfg80211_resume(ar);
960
961 return 0;
962}
963
964
965static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
966{
967 int status;
968 u8 addr_val[4];
969 s32 i;
970
971
972
973
974
975
976 for (i = 1; i <= 3; i++) {
977
978
979
980
981 memset(addr_val, ((u8 *)&addr)[i], 4);
982
983
984
985
986
987
988 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
989 4, HIF_WR_SYNC_BYTE_FIX);
990 if (status)
991 break;
992 }
993
994 if (status) {
995 ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n",
996 __func__, addr, reg_addr);
997 return status;
998 }
999
1000
1001
1002
1003
1004
1005
1006 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
1007 4, HIF_WR_SYNC_BYTE_INC);
1008
1009 if (status) {
1010 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
1011 __func__, addr, reg_addr);
1012 return status;
1013 }
1014
1015 return 0;
1016}
1017
1018static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
1019{
1020 int status;
1021
1022
1023 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
1024 address);
1025
1026 if (status)
1027 return status;
1028
1029
1030 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1031 (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
1032 if (status) {
1033 ath6kl_err("%s: failed to read from window data addr\n",
1034 __func__);
1035 return status;
1036 }
1037
1038 return status;
1039}
1040
1041static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
1042 __le32 data)
1043{
1044 int status;
1045 u32 val = (__force u32) data;
1046
1047
1048 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1049 (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
1050 if (status) {
1051 ath6kl_err("%s: failed to write 0x%x to window data addr\n",
1052 __func__, data);
1053 return status;
1054 }
1055
1056
1057 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
1058 address);
1059}
1060
1061static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
1062{
1063 u32 addr;
1064 unsigned long timeout;
1065 int ret;
1066
1067 ar->bmi.cmd_credits = 0;
1068
1069
1070 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
1071
1072 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1073 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
1074
1075
1076
1077
1078
1079
1080 ret = ath6kl_sdio_read_write_sync(ar, addr,
1081 (u8 *)&ar->bmi.cmd_credits, 4,
1082 HIF_RD_SYNC_BYTE_INC);
1083 if (ret) {
1084 ath6kl_err("Unable to decrement the command credit count register: %d\n",
1085 ret);
1086 return ret;
1087 }
1088
1089
1090
1091
1092 ar->bmi.cmd_credits &= 0xFF;
1093 }
1094
1095 if (!ar->bmi.cmd_credits) {
1096 ath6kl_err("bmi communication timeout\n");
1097 return -ETIMEDOUT;
1098 }
1099
1100 return 0;
1101}
1102
1103static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
1104{
1105 unsigned long timeout;
1106 u32 rx_word = 0;
1107 int ret = 0;
1108
1109 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1110 while ((time_before(jiffies, timeout)) && !rx_word) {
1111 ret = ath6kl_sdio_read_write_sync(ar,
1112 RX_LOOKAHEAD_VALID_ADDRESS,
1113 (u8 *)&rx_word, sizeof(rx_word),
1114 HIF_RD_SYNC_BYTE_INC);
1115 if (ret) {
1116 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
1117 return ret;
1118 }
1119
1120
1121 rx_word &= (1 << ENDPOINT1);
1122 }
1123
1124 if (!rx_word) {
1125 ath6kl_err("bmi_recv_buf FIFO empty\n");
1126 return -EINVAL;
1127 }
1128
1129 return ret;
1130}
1131
1132static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1133{
1134 int ret;
1135 u32 addr;
1136
1137 ret = ath6kl_sdio_bmi_credits(ar);
1138 if (ret)
1139 return ret;
1140
1141 addr = ar->mbox_info.htc_addr;
1142
1143 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1144 HIF_WR_SYNC_BYTE_INC);
1145 if (ret) {
1146 ath6kl_err("unable to send the bmi data to the device\n");
1147 return ret;
1148 }
1149
1150 return 0;
1151}
1152
1153static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
1154{
1155 int ret;
1156 u32 addr;
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 if (len >= 4) {
1205 ret = ath6kl_bmi_get_rx_lkahd(ar);
1206 if (ret)
1207 return ret;
1208 }
1209
1210 addr = ar->mbox_info.htc_addr;
1211 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1212 HIF_RD_SYNC_BYTE_INC);
1213 if (ret) {
1214 ath6kl_err("Unable to read the bmi data from the device: %d\n",
1215 ret);
1216 return ret;
1217 }
1218
1219 return 0;
1220}
1221
1222static void ath6kl_sdio_stop(struct ath6kl *ar)
1223{
1224 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
1225 struct bus_request *req, *tmp_req;
1226 void *context;
1227
1228
1229
1230 cancel_work_sync(&ar_sdio->wr_async_work);
1231
1232 spin_lock_bh(&ar_sdio->wr_async_lock);
1233
1234 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1235 list_del(&req->list);
1236
1237 if (req->scat_req) {
1238
1239 req->scat_req->status = -ECANCELED;
1240 req->scat_req->complete(ar_sdio->ar->htc_target,
1241 req->scat_req);
1242 } else {
1243 context = req->packet;
1244 ath6kl_sdio_free_bus_req(ar_sdio, req);
1245 ath6kl_hif_rw_comp_handler(context, -ECANCELED);
1246 }
1247 }
1248
1249 spin_unlock_bh(&ar_sdio->wr_async_lock);
1250
1251 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
1252}
1253
1254static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
1255 .read_write_sync = ath6kl_sdio_read_write_sync,
1256 .write_async = ath6kl_sdio_write_async,
1257 .irq_enable = ath6kl_sdio_irq_enable,
1258 .irq_disable = ath6kl_sdio_irq_disable,
1259 .scatter_req_get = ath6kl_sdio_scatter_req_get,
1260 .scatter_req_add = ath6kl_sdio_scatter_req_add,
1261 .enable_scatter = ath6kl_sdio_enable_scatter,
1262 .scat_req_rw = ath6kl_sdio_async_rw_scatter,
1263 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
1264 .suspend = ath6kl_sdio_suspend,
1265 .resume = ath6kl_sdio_resume,
1266 .diag_read32 = ath6kl_sdio_diag_read32,
1267 .diag_write32 = ath6kl_sdio_diag_write32,
1268 .bmi_read = ath6kl_sdio_bmi_read,
1269 .bmi_write = ath6kl_sdio_bmi_write,
1270 .power_on = ath6kl_sdio_power_on,
1271 .power_off = ath6kl_sdio_power_off,
1272 .stop = ath6kl_sdio_stop,
1273};
1274
1275#ifdef CONFIG_PM_SLEEP
1276
1277
1278
1279
1280
1281static int ath6kl_sdio_pm_suspend(struct device *device)
1282{
1283 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
1284
1285 return 0;
1286}
1287
1288static int ath6kl_sdio_pm_resume(struct device *device)
1289{
1290 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
1291
1292 return 0;
1293}
1294
1295static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
1296 ath6kl_sdio_pm_resume);
1297
1298#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
1299
1300#else
1301
1302#define ATH6KL_SDIO_PM_OPS NULL
1303
1304#endif
1305
1306static int ath6kl_sdio_probe(struct sdio_func *func,
1307 const struct sdio_device_id *id)
1308{
1309 int ret;
1310 struct ath6kl_sdio *ar_sdio;
1311 struct ath6kl *ar;
1312 int count;
1313
1314 ath6kl_dbg(ATH6KL_DBG_BOOT,
1315 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
1316 func->num, func->vendor, func->device,
1317 func->max_blksize, func->cur_blksize);
1318
1319 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
1320 if (!ar_sdio)
1321 return -ENOMEM;
1322
1323 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
1324 if (!ar_sdio->dma_buffer) {
1325 ret = -ENOMEM;
1326 goto err_hif;
1327 }
1328
1329 ar_sdio->func = func;
1330 sdio_set_drvdata(func, ar_sdio);
1331
1332 ar_sdio->id = id;
1333 ar_sdio->is_disabled = true;
1334
1335 spin_lock_init(&ar_sdio->lock);
1336 spin_lock_init(&ar_sdio->scat_lock);
1337 spin_lock_init(&ar_sdio->wr_async_lock);
1338 mutex_init(&ar_sdio->dma_buffer_mutex);
1339
1340 INIT_LIST_HEAD(&ar_sdio->scat_req);
1341 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
1342 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
1343
1344 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
1345
1346 init_waitqueue_head(&ar_sdio->irq_wq);
1347
1348 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
1349 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
1350
1351 ar = ath6kl_core_create(&ar_sdio->func->dev);
1352 if (!ar) {
1353 ath6kl_err("Failed to alloc ath6kl core\n");
1354 ret = -ENOMEM;
1355 goto err_dma;
1356 }
1357
1358 ar_sdio->ar = ar;
1359 ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
1360 ar->hif_priv = ar_sdio;
1361 ar->hif_ops = &ath6kl_sdio_ops;
1362 ar->bmi.max_data_size = 256;
1363
1364 ath6kl_sdio_set_mbox_info(ar);
1365
1366 ret = ath6kl_sdio_config(ar);
1367 if (ret) {
1368 ath6kl_err("Failed to config sdio: %d\n", ret);
1369 goto err_core_alloc;
1370 }
1371
1372 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX);
1373 if (ret) {
1374 ath6kl_err("Failed to init ath6kl core\n");
1375 goto err_core_alloc;
1376 }
1377
1378 return ret;
1379
1380err_core_alloc:
1381 ath6kl_core_destroy(ar_sdio->ar);
1382err_dma:
1383 kfree(ar_sdio->dma_buffer);
1384err_hif:
1385 kfree(ar_sdio);
1386
1387 return ret;
1388}
1389
1390static void ath6kl_sdio_remove(struct sdio_func *func)
1391{
1392 struct ath6kl_sdio *ar_sdio;
1393
1394 ath6kl_dbg(ATH6KL_DBG_BOOT,
1395 "sdio removed func %d vendor 0x%x device 0x%x\n",
1396 func->num, func->vendor, func->device);
1397
1398 ar_sdio = sdio_get_drvdata(func);
1399
1400 ath6kl_stop_txrx(ar_sdio->ar);
1401 cancel_work_sync(&ar_sdio->wr_async_work);
1402
1403 ath6kl_core_cleanup(ar_sdio->ar);
1404 ath6kl_core_destroy(ar_sdio->ar);
1405
1406 kfree(ar_sdio->dma_buffer);
1407 kfree(ar_sdio);
1408}
1409
1410static const struct sdio_device_id ath6kl_sdio_devices[] = {
1411 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6003_00)},
1412 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6003_01)},
1413 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_00)},
1414 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_01)},
1415 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_02)},
1416 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_18)},
1417 {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_19)},
1418 {},
1419};
1420
1421MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
1422
1423static struct sdio_driver ath6kl_sdio_driver = {
1424 .name = "ath6kl_sdio",
1425 .id_table = ath6kl_sdio_devices,
1426 .probe = ath6kl_sdio_probe,
1427 .remove = ath6kl_sdio_remove,
1428 .drv.pm = ATH6KL_SDIO_PM_OPS,
1429};
1430
1431static int __init ath6kl_sdio_init(void)
1432{
1433 int ret;
1434
1435 ret = sdio_register_driver(&ath6kl_sdio_driver);
1436 if (ret)
1437 ath6kl_err("sdio driver registration failed: %d\n", ret);
1438
1439 return ret;
1440}
1441
1442static void __exit ath6kl_sdio_exit(void)
1443{
1444 sdio_unregister_driver(&ath6kl_sdio_driver);
1445}
1446
1447module_init(ath6kl_sdio_init);
1448module_exit(ath6kl_sdio_exit);
1449
1450MODULE_AUTHOR("Atheros Communications, Inc.");
1451MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
1452MODULE_LICENSE("Dual BSD/GPL");
1453
1454MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE);
1455MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE);
1456MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE);
1457MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
1458MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
1459MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE);
1460MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE);
1461MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE);
1462MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
1463MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
1464MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE);
1465MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
1466MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
1467MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
1468MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
1469MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
1470MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE);
1471MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
1472MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
1473MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE);
1474MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE);
1475MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE);
1476