1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/mmc.h>
21#include <linux/mmc/host.h>
22#include <linux/mmc/sdio_func.h>
23#include <linux/mmc/sdio_ids.h>
24#include <linux/mmc/sdio.h>
25#include <linux/mmc/sd.h>
26#include "hif.h"
27#include "hif-ops.h"
28#include "target.h"
29#include "debug.h"
30#include "cfg80211.h"
31
32struct ath6kl_sdio {
33 struct sdio_func *func;
34
35
36 spinlock_t lock;
37
38
39 struct list_head bus_req_freeq;
40
41
42 struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
43
44 struct ath6kl *ar;
45
46 u8 *dma_buffer;
47
48
49 struct mutex dma_buffer_mutex;
50
51
52 struct list_head scat_req;
53
54 atomic_t irq_handling;
55 wait_queue_head_t irq_wq;
56
57
58 spinlock_t scat_lock;
59
60 bool scatter_enabled;
61
62 bool is_disabled;
63 const struct sdio_device_id *id;
64 struct work_struct wr_async_work;
65 struct list_head wr_asyncq;
66
67
68 spinlock_t wr_async_lock;
69};
70
71#define CMD53_ARG_READ 0
72#define CMD53_ARG_WRITE 1
73#define CMD53_ARG_BLOCK_BASIS 1
74#define CMD53_ARG_FIXED_ADDRESS 0
75#define CMD53_ARG_INCR_ADDRESS 1
76
77static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
78{
79 return ar->hif_priv;
80}
81
82
83
84
85
86
87
88static inline bool buf_needs_bounce(u8 *buf)
89{
90 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
91}
92
93static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
94{
95 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
96
97
98 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
99 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
100 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
101 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
102 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
103 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
104}
105
106static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
107 u8 mode, u8 opcode, u32 addr,
108 u16 blksz)
109{
110 *arg = (((rw & 1) << 31) |
111 ((func & 0x7) << 28) |
112 ((mode & 1) << 27) |
113 ((opcode & 1) << 26) |
114 ((addr & 0x1FFFF) << 9) |
115 (blksz & 0x1FF));
116}
117
118static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
119 unsigned int address,
120 unsigned char val)
121{
122 const u8 func = 0;
123
124 *arg = ((write & 1) << 31) |
125 ((func & 0x7) << 28) |
126 ((raw & 1) << 27) |
127 (1 << 26) |
128 ((address & 0x1FFFF) << 9) |
129 (1 << 8) |
130 (val & 0xFF);
131}
132
133static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
134 unsigned int address,
135 unsigned char byte)
136{
137 struct mmc_command io_cmd;
138
139 memset(&io_cmd, 0, sizeof(io_cmd));
140 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
141 io_cmd.opcode = SD_IO_RW_DIRECT;
142 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
143
144 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
145}
146
147static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
148 u8 *buf, u32 len)
149{
150 int ret = 0;
151
152 sdio_claim_host(func);
153
154 if (request & HIF_WRITE) {
155
156 if (addr >= HIF_MBOX_BASE_ADDR &&
157 addr <= HIF_MBOX_END_ADDR)
158 addr += (HIF_MBOX_WIDTH - len);
159
160
161 if (addr == HIF_MBOX0_EXT_BASE_ADDR)
162 addr += HIF_MBOX0_EXT_WIDTH - len;
163
164 if (request & HIF_FIXED_ADDRESS)
165 ret = sdio_writesb(func, addr, buf, len);
166 else
167 ret = sdio_memcpy_toio(func, addr, buf, len);
168 } else {
169 if (request & HIF_FIXED_ADDRESS)
170 ret = sdio_readsb(func, buf, addr, len);
171 else
172 ret = sdio_memcpy_fromio(func, buf, addr, len);
173 }
174
175 sdio_release_host(func);
176
177 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
178 request & HIF_WRITE ? "wr" : "rd", addr,
179 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
180 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
181
182 return ret;
183}
184
185static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
186{
187 struct bus_request *bus_req;
188
189 spin_lock_bh(&ar_sdio->lock);
190
191 if (list_empty(&ar_sdio->bus_req_freeq)) {
192 spin_unlock_bh(&ar_sdio->lock);
193 return NULL;
194 }
195
196 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
197 struct bus_request, list);
198 list_del(&bus_req->list);
199
200 spin_unlock_bh(&ar_sdio->lock);
201 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
202 __func__, bus_req);
203
204 return bus_req;
205}
206
207static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
208 struct bus_request *bus_req)
209{
210 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
211 __func__, bus_req);
212
213 spin_lock_bh(&ar_sdio->lock);
214 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
215 spin_unlock_bh(&ar_sdio->lock);
216}
217
218static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
219 struct mmc_data *data)
220{
221 struct scatterlist *sg;
222 int i;
223
224 data->blksz = HIF_MBOX_BLOCK_SIZE;
225 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
226
227 ath6kl_dbg(ATH6KL_DBG_SCATTER,
228 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
229 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
230 data->blksz, data->blocks, scat_req->len,
231 scat_req->scat_entries);
232
233 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
234 MMC_DATA_READ;
235
236
237 sg = scat_req->sgentries;
238 sg_init_table(sg, scat_req->scat_entries);
239
240
241 for (i = 0; i < scat_req->scat_entries; i++, sg++) {
242 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
243 i, scat_req->scat_list[i].buf,
244 scat_req->scat_list[i].len);
245
246 sg_set_buf(sg, scat_req->scat_list[i].buf,
247 scat_req->scat_list[i].len);
248 }
249
250
251 data->sg = scat_req->sgentries;
252 data->sg_len = scat_req->scat_entries;
253}
254
255static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
256 struct bus_request *req)
257{
258 struct mmc_request mmc_req;
259 struct mmc_command cmd;
260 struct mmc_data data;
261 struct hif_scatter_req *scat_req;
262 u8 opcode, rw;
263 int status, len;
264
265 scat_req = req->scat_req;
266
267 if (scat_req->virt_scat) {
268 len = scat_req->len;
269 if (scat_req->req & HIF_BLOCK_BASIS)
270 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
271
272 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
273 scat_req->addr, scat_req->virt_dma_buf,
274 len);
275 goto scat_complete;
276 }
277
278 memset(&mmc_req, 0, sizeof(struct mmc_request));
279 memset(&cmd, 0, sizeof(struct mmc_command));
280 memset(&data, 0, sizeof(struct mmc_data));
281
282 ath6kl_sdio_setup_scat_data(scat_req, &data);
283
284 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
285 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
286
287 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
288
289
290 if (scat_req->req & HIF_WRITE) {
291 if (scat_req->addr == HIF_MBOX_BASE_ADDR)
292 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
293 else
294
295 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
296 }
297
298
299 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
300 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
301 data.blocks);
302
303 cmd.opcode = SD_IO_RW_EXTENDED;
304 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
305
306 mmc_req.cmd = &cmd;
307 mmc_req.data = &data;
308
309 sdio_claim_host(ar_sdio->func);
310
311 mmc_set_data_timeout(&data, ar_sdio->func->card);
312
313 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
314
315 sdio_release_host(ar_sdio->func);
316
317 status = cmd.error ? cmd.error : data.error;
318
319scat_complete:
320 scat_req->status = status;
321
322 if (scat_req->status)
323 ath6kl_err("Scatter write request failed:%d\n",
324 scat_req->status);
325
326 if (scat_req->req & HIF_ASYNCHRONOUS)
327 scat_req->complete(ar_sdio->ar->htc_target, scat_req);
328
329 return status;
330}
331
332static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
333 int n_scat_entry, int n_scat_req,
334 bool virt_scat)
335{
336 struct hif_scatter_req *s_req;
337 struct bus_request *bus_req;
338 int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
339 u8 *virt_buf;
340
341 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
342 scat_req_sz = sizeof(*s_req) + scat_list_sz;
343
344 if (!virt_scat)
345 sg_sz = sizeof(struct scatterlist) * n_scat_entry;
346 else
347 buf_sz = 2 * L1_CACHE_BYTES +
348 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
349
350 for (i = 0; i < n_scat_req; i++) {
351
352 s_req = kzalloc(scat_req_sz, GFP_KERNEL);
353 if (!s_req)
354 return -ENOMEM;
355
356 if (virt_scat) {
357 virt_buf = kzalloc(buf_sz, GFP_KERNEL);
358 if (!virt_buf) {
359 kfree(s_req);
360 return -ENOMEM;
361 }
362
363 s_req->virt_dma_buf =
364 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
365 } else {
366
367 s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
368
369 if (!s_req->sgentries) {
370 kfree(s_req);
371 return -ENOMEM;
372 }
373 }
374
375
376 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
377 if (!bus_req) {
378 kfree(s_req->sgentries);
379 kfree(s_req->virt_dma_buf);
380 kfree(s_req);
381 return -ENOMEM;
382 }
383
384
385 bus_req->scat_req = s_req;
386 s_req->busrequest = bus_req;
387
388 s_req->virt_scat = virt_scat;
389
390
391 hif_scatter_req_add(ar_sdio->ar, s_req);
392 }
393
394 return 0;
395}
396
397static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
398 u32 len, u32 request)
399{
400 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
401 u8 *tbuf = NULL;
402 int ret;
403 bool bounced = false;
404
405 if (request & HIF_BLOCK_BASIS)
406 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
407
408 if (buf_needs_bounce(buf)) {
409 if (!ar_sdio->dma_buffer)
410 return -ENOMEM;
411 mutex_lock(&ar_sdio->dma_buffer_mutex);
412 tbuf = ar_sdio->dma_buffer;
413
414 if (request & HIF_WRITE)
415 memcpy(tbuf, buf, len);
416
417 bounced = true;
418 } else
419 tbuf = buf;
420
421 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
422 if ((request & HIF_READ) && bounced)
423 memcpy(buf, tbuf, len);
424
425 if (bounced)
426 mutex_unlock(&ar_sdio->dma_buffer_mutex);
427
428 return ret;
429}
430
431static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
432 struct bus_request *req)
433{
434 if (req->scat_req)
435 ath6kl_sdio_scat_rw(ar_sdio, req);
436 else {
437 void *context;
438 int status;
439
440 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
441 req->buffer, req->length,
442 req->request);
443 context = req->packet;
444 ath6kl_sdio_free_bus_req(ar_sdio, req);
445 ath6kl_hif_rw_comp_handler(context, status);
446 }
447}
448
449static void ath6kl_sdio_write_async_work(struct work_struct *work)
450{
451 struct ath6kl_sdio *ar_sdio;
452 struct bus_request *req, *tmp_req;
453
454 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
455
456 spin_lock_bh(&ar_sdio->wr_async_lock);
457 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
458 list_del(&req->list);
459 spin_unlock_bh(&ar_sdio->wr_async_lock);
460 __ath6kl_sdio_write_async(ar_sdio, req);
461 spin_lock_bh(&ar_sdio->wr_async_lock);
462 }
463 spin_unlock_bh(&ar_sdio->wr_async_lock);
464}
465
466static void ath6kl_sdio_irq_handler(struct sdio_func *func)
467{
468 int status;
469 struct ath6kl_sdio *ar_sdio;
470
471 ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
472
473 ar_sdio = sdio_get_drvdata(func);
474 atomic_set(&ar_sdio->irq_handling, 1);
475
476
477
478
479 sdio_release_host(ar_sdio->func);
480
481 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
482 sdio_claim_host(ar_sdio->func);
483
484 atomic_set(&ar_sdio->irq_handling, 0);
485 wake_up(&ar_sdio->irq_wq);
486
487 WARN_ON(status && status != -ECANCELED);
488}
489
490static int ath6kl_sdio_power_on(struct ath6kl *ar)
491{
492 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
493 struct sdio_func *func = ar_sdio->func;
494 int ret = 0;
495
496 if (!ar_sdio->is_disabled)
497 return 0;
498
499 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
500
501 sdio_claim_host(func);
502
503 ret = sdio_enable_func(func);
504 if (ret) {
505 ath6kl_err("Unable to enable sdio func: %d)\n", ret);
506 sdio_release_host(func);
507 return ret;
508 }
509
510 sdio_release_host(func);
511
512
513
514
515
516 msleep(10);
517
518 ar_sdio->is_disabled = false;
519
520 return ret;
521}
522
523static int ath6kl_sdio_power_off(struct ath6kl *ar)
524{
525 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
526 int ret;
527
528 if (ar_sdio->is_disabled)
529 return 0;
530
531 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
532
533
534 sdio_claim_host(ar_sdio->func);
535 ret = sdio_disable_func(ar_sdio->func);
536 sdio_release_host(ar_sdio->func);
537
538 if (ret)
539 return ret;
540
541 ar_sdio->is_disabled = true;
542
543 return ret;
544}
545
546static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
547 u32 length, u32 request,
548 struct htc_packet *packet)
549{
550 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
551 struct bus_request *bus_req;
552
553 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
554
555 if (!bus_req)
556 return -ENOMEM;
557
558 bus_req->address = address;
559 bus_req->buffer = buffer;
560 bus_req->length = length;
561 bus_req->request = request;
562 bus_req->packet = packet;
563
564 spin_lock_bh(&ar_sdio->wr_async_lock);
565 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
566 spin_unlock_bh(&ar_sdio->wr_async_lock);
567 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
568
569 return 0;
570}
571
572static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
573{
574 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
575 int ret;
576
577 sdio_claim_host(ar_sdio->func);
578
579
580 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
581 if (ret)
582 ath6kl_err("Failed to claim sdio irq: %d\n", ret);
583
584 sdio_release_host(ar_sdio->func);
585}
586
587static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
588{
589 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
590
591 return !atomic_read(&ar_sdio->irq_handling);
592}
593
594static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
595{
596 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
597 int ret;
598
599 sdio_claim_host(ar_sdio->func);
600
601 if (atomic_read(&ar_sdio->irq_handling)) {
602 sdio_release_host(ar_sdio->func);
603
604 ret = wait_event_interruptible(ar_sdio->irq_wq,
605 ath6kl_sdio_is_on_irq(ar));
606 if (ret)
607 return;
608
609 sdio_claim_host(ar_sdio->func);
610 }
611
612 ret = sdio_release_irq(ar_sdio->func);
613 if (ret)
614 ath6kl_err("Failed to release sdio irq: %d\n", ret);
615
616 sdio_release_host(ar_sdio->func);
617}
618
619static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
620{
621 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
622 struct hif_scatter_req *node = NULL;
623
624 spin_lock_bh(&ar_sdio->scat_lock);
625
626 if (!list_empty(&ar_sdio->scat_req)) {
627 node = list_first_entry(&ar_sdio->scat_req,
628 struct hif_scatter_req, list);
629 list_del(&node->list);
630
631 node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
632 }
633
634 spin_unlock_bh(&ar_sdio->scat_lock);
635
636 return node;
637}
638
639static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
640 struct hif_scatter_req *s_req)
641{
642 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
643
644 spin_lock_bh(&ar_sdio->scat_lock);
645
646 list_add_tail(&s_req->list, &ar_sdio->scat_req);
647
648 spin_unlock_bh(&ar_sdio->scat_lock);
649
650}
651
652
653static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
654 struct hif_scatter_req *scat_req)
655{
656 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
657 u32 request = scat_req->req;
658 int status = 0;
659
660 if (!scat_req->len)
661 return -EINVAL;
662
663 ath6kl_dbg(ATH6KL_DBG_SCATTER,
664 "hif-scatter: total len: %d scatter entries: %d\n",
665 scat_req->len, scat_req->scat_entries);
666
667 if (request & HIF_SYNCHRONOUS)
668 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
669 else {
670 spin_lock_bh(&ar_sdio->wr_async_lock);
671 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
672 spin_unlock_bh(&ar_sdio->wr_async_lock);
673 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
674 }
675
676 return status;
677}
678
679
680static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
681{
682 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
683 struct hif_scatter_req *s_req, *tmp_req;
684
685
686 spin_lock_bh(&ar_sdio->scat_lock);
687 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
688 list_del(&s_req->list);
689 spin_unlock_bh(&ar_sdio->scat_lock);
690
691
692
693
694
695
696 if (s_req->busrequest)
697 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
698 kfree(s_req->virt_dma_buf);
699 kfree(s_req->sgentries);
700 kfree(s_req);
701
702 spin_lock_bh(&ar_sdio->scat_lock);
703 }
704 spin_unlock_bh(&ar_sdio->scat_lock);
705}
706
707
708static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
709{
710 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
711 struct htc_target *target = ar->htc_target;
712 int ret;
713 bool virt_scat = false;
714
715 if (ar_sdio->scatter_enabled)
716 return 0;
717
718 ar_sdio->scatter_enabled = true;
719
720
721 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
722 ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
723 ar_sdio->func->card->host->max_segs,
724 MAX_SCATTER_ENTRIES_PER_REQ);
725 virt_scat = true;
726 }
727
728 if (!virt_scat) {
729 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
730 MAX_SCATTER_ENTRIES_PER_REQ,
731 MAX_SCATTER_REQUESTS, virt_scat);
732
733 if (!ret) {
734 ath6kl_dbg(ATH6KL_DBG_BOOT,
735 "hif-scatter enabled requests %d entries %d\n",
736 MAX_SCATTER_REQUESTS,
737 MAX_SCATTER_ENTRIES_PER_REQ);
738
739 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
740 target->max_xfer_szper_scatreq =
741 MAX_SCATTER_REQ_TRANSFER_SIZE;
742 } else {
743 ath6kl_sdio_cleanup_scatter(ar);
744 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
745 }
746 }
747
748 if (virt_scat || ret) {
749 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
750 ATH6KL_SCATTER_ENTRIES_PER_REQ,
751 ATH6KL_SCATTER_REQS, virt_scat);
752
753 if (ret) {
754 ath6kl_err("failed to alloc virtual scatter resources !\n");
755 ath6kl_sdio_cleanup_scatter(ar);
756 return ret;
757 }
758
759 ath6kl_dbg(ATH6KL_DBG_BOOT,
760 "virtual scatter enabled requests %d entries %d\n",
761 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
762
763 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
764 target->max_xfer_szper_scatreq =
765 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
766 }
767
768 return 0;
769}
770
771static int ath6kl_sdio_config(struct ath6kl *ar)
772{
773 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
774 struct sdio_func *func = ar_sdio->func;
775 int ret;
776
777 sdio_claim_host(func);
778
779 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
780 MANUFACTURER_ID_AR6003_BASE) {
781
782 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
783 CCCR_SDIO_IRQ_MODE_REG,
784 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
785 if (ret) {
786 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
787 ret);
788 goto out;
789 }
790
791 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
792 }
793
794
795 func->enable_timeout = 100;
796
797 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
798 if (ret) {
799 ath6kl_err("Set sdio block size %d failed: %d)\n",
800 HIF_MBOX_BLOCK_SIZE, ret);
801 goto out;
802 }
803
804out:
805 sdio_release_host(func);
806
807 return ret;
808}
809
810static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar)
811{
812 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
813 struct sdio_func *func = ar_sdio->func;
814 mmc_pm_flag_t flags;
815 int ret;
816
817 flags = sdio_get_host_pm_caps(func);
818
819 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
820
821 if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
822 !(flags & MMC_PM_KEEP_POWER))
823 return -EINVAL;
824
825 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
826 if (ret) {
827 ath6kl_err("set sdio keep pwr flag failed: %d\n", ret);
828 return ret;
829 }
830
831
832 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
833 if (ret)
834 ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
835
836 return ret;
837}
838
839static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
840{
841 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
842 struct sdio_func *func = ar_sdio->func;
843 mmc_pm_flag_t flags;
844 bool try_deepsleep = false;
845 int ret;
846
847 if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
848 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sched scan is in progress\n");
849
850 ret = ath6kl_set_sdio_pm_caps(ar);
851 if (ret)
852 goto cut_pwr;
853
854 ret = ath6kl_cfg80211_suspend(ar,
855 ATH6KL_CFG_SUSPEND_SCHED_SCAN,
856 NULL);
857 if (ret)
858 goto cut_pwr;
859
860 return 0;
861 }
862
863 if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
864 (!ar->suspend_mode && wow)) {
865
866 ret = ath6kl_set_sdio_pm_caps(ar);
867 if (ret)
868 goto cut_pwr;
869
870 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
871 if (ret && ret != -ENOTCONN)
872 ath6kl_err("wow suspend failed: %d\n", ret);
873
874 if (ret &&
875 (!ar->wow_suspend_mode ||
876 ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP))
877 try_deepsleep = true;
878 else if (ret &&
879 ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR)
880 goto cut_pwr;
881 if (!ret)
882 return 0;
883 }
884
885 if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
886 !ar->suspend_mode || try_deepsleep) {
887
888 flags = sdio_get_host_pm_caps(func);
889 if (!(flags & MMC_PM_KEEP_POWER))
890 goto cut_pwr;
891
892 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
893 if (ret)
894 goto cut_pwr;
895
896
897
898
899
900
901
902 if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
903 ret = sdio_set_host_pm_flags(func,
904 MMC_PM_WAKE_SDIO_IRQ);
905 if (ret)
906 goto cut_pwr;
907 }
908
909 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
910 NULL);
911 if (ret)
912 goto cut_pwr;
913
914 return 0;
915 }
916
917cut_pwr:
918 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
919}
920
921static int ath6kl_sdio_resume(struct ath6kl *ar)
922{
923 switch (ar->state) {
924 case ATH6KL_STATE_OFF:
925 case ATH6KL_STATE_CUTPOWER:
926 ath6kl_dbg(ATH6KL_DBG_SUSPEND,
927 "sdio resume configuring sdio\n");
928
929
930 ath6kl_sdio_config(ar);
931 break;
932
933 case ATH6KL_STATE_ON:
934 break;
935
936 case ATH6KL_STATE_DEEPSLEEP:
937 break;
938
939 case ATH6KL_STATE_WOW:
940 break;
941
942 case ATH6KL_STATE_SCHED_SCAN:
943 break;
944
945 case ATH6KL_STATE_SUSPENDING:
946 break;
947
948 case ATH6KL_STATE_RESUMING:
949 break;
950 }
951
952 ath6kl_cfg80211_resume(ar);
953
954 return 0;
955}
956
957
958static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
959{
960 int status;
961 u8 addr_val[4];
962 s32 i;
963
964
965
966
967
968
969 for (i = 1; i <= 3; i++) {
970
971
972
973
974 memset(addr_val, ((u8 *)&addr)[i], 4);
975
976
977
978
979
980
981 status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
982 4, HIF_WR_SYNC_BYTE_FIX);
983 if (status)
984 break;
985 }
986
987 if (status) {
988 ath6kl_err("%s: failed to write initial bytes of 0x%x "
989 "to window reg: 0x%X\n", __func__,
990 addr, reg_addr);
991 return status;
992 }
993
994
995
996
997
998
999
1000 status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
1001 4, HIF_WR_SYNC_BYTE_INC);
1002
1003 if (status) {
1004 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
1005 __func__, addr, reg_addr);
1006 return status;
1007 }
1008
1009 return 0;
1010}
1011
1012static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
1013{
1014 int status;
1015
1016
1017 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
1018 address);
1019
1020 if (status)
1021 return status;
1022
1023
1024 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1025 (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
1026 if (status) {
1027 ath6kl_err("%s: failed to read from window data addr\n",
1028 __func__);
1029 return status;
1030 }
1031
1032 return status;
1033}
1034
1035static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
1036 __le32 data)
1037{
1038 int status;
1039 u32 val = (__force u32) data;
1040
1041
1042 status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1043 (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
1044 if (status) {
1045 ath6kl_err("%s: failed to write 0x%x to window data addr\n",
1046 __func__, data);
1047 return status;
1048 }
1049
1050
1051 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
1052 address);
1053}
1054
1055static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
1056{
1057 u32 addr;
1058 unsigned long timeout;
1059 int ret;
1060
1061 ar->bmi.cmd_credits = 0;
1062
1063
1064 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
1065
1066 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1067 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
1068
1069
1070
1071
1072
1073
1074
1075 ret = ath6kl_sdio_read_write_sync(ar, addr,
1076 (u8 *)&ar->bmi.cmd_credits, 4,
1077 HIF_RD_SYNC_BYTE_INC);
1078 if (ret) {
1079 ath6kl_err("Unable to decrement the command credit "
1080 "count register: %d\n", ret);
1081 return ret;
1082 }
1083
1084
1085
1086
1087 ar->bmi.cmd_credits &= 0xFF;
1088 }
1089
1090 if (!ar->bmi.cmd_credits) {
1091 ath6kl_err("bmi communication timeout\n");
1092 return -ETIMEDOUT;
1093 }
1094
1095 return 0;
1096}
1097
1098static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
1099{
1100 unsigned long timeout;
1101 u32 rx_word = 0;
1102 int ret = 0;
1103
1104 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1105 while ((time_before(jiffies, timeout)) && !rx_word) {
1106 ret = ath6kl_sdio_read_write_sync(ar,
1107 RX_LOOKAHEAD_VALID_ADDRESS,
1108 (u8 *)&rx_word, sizeof(rx_word),
1109 HIF_RD_SYNC_BYTE_INC);
1110 if (ret) {
1111 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
1112 return ret;
1113 }
1114
1115
1116 rx_word &= (1 << ENDPOINT1);
1117 }
1118
1119 if (!rx_word) {
1120 ath6kl_err("bmi_recv_buf FIFO empty\n");
1121 return -EINVAL;
1122 }
1123
1124 return ret;
1125}
1126
1127static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1128{
1129 int ret;
1130 u32 addr;
1131
1132 ret = ath6kl_sdio_bmi_credits(ar);
1133 if (ret)
1134 return ret;
1135
1136 addr = ar->mbox_info.htc_addr;
1137
1138 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1139 HIF_WR_SYNC_BYTE_INC);
1140 if (ret)
1141 ath6kl_err("unable to send the bmi data to the device\n");
1142
1143 return ret;
1144}
1145
1146static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
1147{
1148 int ret;
1149 u32 addr;
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 if (len >= 4) {
1198 ret = ath6kl_bmi_get_rx_lkahd(ar);
1199 if (ret)
1200 return ret;
1201 }
1202
1203 addr = ar->mbox_info.htc_addr;
1204 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1205 HIF_RD_SYNC_BYTE_INC);
1206 if (ret) {
1207 ath6kl_err("Unable to read the bmi data from the device: %d\n",
1208 ret);
1209 return ret;
1210 }
1211
1212 return 0;
1213}
1214
1215static void ath6kl_sdio_stop(struct ath6kl *ar)
1216{
1217 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
1218 struct bus_request *req, *tmp_req;
1219 void *context;
1220
1221
1222
1223 cancel_work_sync(&ar_sdio->wr_async_work);
1224
1225 spin_lock_bh(&ar_sdio->wr_async_lock);
1226
1227 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1228 list_del(&req->list);
1229
1230 if (req->scat_req) {
1231
1232 req->scat_req->status = -ECANCELED;
1233 req->scat_req->complete(ar_sdio->ar->htc_target,
1234 req->scat_req);
1235 } else {
1236 context = req->packet;
1237 ath6kl_sdio_free_bus_req(ar_sdio, req);
1238 ath6kl_hif_rw_comp_handler(context, -ECANCELED);
1239 }
1240 }
1241
1242 spin_unlock_bh(&ar_sdio->wr_async_lock);
1243
1244 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
1245}
1246
1247static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
1248 .read_write_sync = ath6kl_sdio_read_write_sync,
1249 .write_async = ath6kl_sdio_write_async,
1250 .irq_enable = ath6kl_sdio_irq_enable,
1251 .irq_disable = ath6kl_sdio_irq_disable,
1252 .scatter_req_get = ath6kl_sdio_scatter_req_get,
1253 .scatter_req_add = ath6kl_sdio_scatter_req_add,
1254 .enable_scatter = ath6kl_sdio_enable_scatter,
1255 .scat_req_rw = ath6kl_sdio_async_rw_scatter,
1256 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
1257 .suspend = ath6kl_sdio_suspend,
1258 .resume = ath6kl_sdio_resume,
1259 .diag_read32 = ath6kl_sdio_diag_read32,
1260 .diag_write32 = ath6kl_sdio_diag_write32,
1261 .bmi_read = ath6kl_sdio_bmi_read,
1262 .bmi_write = ath6kl_sdio_bmi_write,
1263 .power_on = ath6kl_sdio_power_on,
1264 .power_off = ath6kl_sdio_power_off,
1265 .stop = ath6kl_sdio_stop,
1266};
1267
1268#ifdef CONFIG_PM_SLEEP
1269
1270
1271
1272
1273
1274static int ath6kl_sdio_pm_suspend(struct device *device)
1275{
1276 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
1277
1278 return 0;
1279}
1280
1281static int ath6kl_sdio_pm_resume(struct device *device)
1282{
1283 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
1284
1285 return 0;
1286}
1287
1288static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
1289 ath6kl_sdio_pm_resume);
1290
1291#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
1292
1293#else
1294
1295#define ATH6KL_SDIO_PM_OPS NULL
1296
1297#endif
1298
1299static int ath6kl_sdio_probe(struct sdio_func *func,
1300 const struct sdio_device_id *id)
1301{
1302 int ret;
1303 struct ath6kl_sdio *ar_sdio;
1304 struct ath6kl *ar;
1305 int count;
1306
1307 ath6kl_dbg(ATH6KL_DBG_BOOT,
1308 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
1309 func->num, func->vendor, func->device,
1310 func->max_blksize, func->cur_blksize);
1311
1312 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
1313 if (!ar_sdio)
1314 return -ENOMEM;
1315
1316 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
1317 if (!ar_sdio->dma_buffer) {
1318 ret = -ENOMEM;
1319 goto err_hif;
1320 }
1321
1322 ar_sdio->func = func;
1323 sdio_set_drvdata(func, ar_sdio);
1324
1325 ar_sdio->id = id;
1326 ar_sdio->is_disabled = true;
1327
1328 spin_lock_init(&ar_sdio->lock);
1329 spin_lock_init(&ar_sdio->scat_lock);
1330 spin_lock_init(&ar_sdio->wr_async_lock);
1331 mutex_init(&ar_sdio->dma_buffer_mutex);
1332
1333 INIT_LIST_HEAD(&ar_sdio->scat_req);
1334 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
1335 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
1336
1337 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
1338
1339 init_waitqueue_head(&ar_sdio->irq_wq);
1340
1341 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
1342 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
1343
1344 ar = ath6kl_core_create(&ar_sdio->func->dev);
1345 if (!ar) {
1346 ath6kl_err("Failed to alloc ath6kl core\n");
1347 ret = -ENOMEM;
1348 goto err_dma;
1349 }
1350
1351 ar_sdio->ar = ar;
1352 ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
1353 ar->hif_priv = ar_sdio;
1354 ar->hif_ops = &ath6kl_sdio_ops;
1355 ar->bmi.max_data_size = 256;
1356
1357 ath6kl_sdio_set_mbox_info(ar);
1358
1359 ret = ath6kl_sdio_config(ar);
1360 if (ret) {
1361 ath6kl_err("Failed to config sdio: %d\n", ret);
1362 goto err_core_alloc;
1363 }
1364
1365 ret = ath6kl_core_init(ar);
1366 if (ret) {
1367 ath6kl_err("Failed to init ath6kl core\n");
1368 goto err_core_alloc;
1369 }
1370
1371 return ret;
1372
1373err_core_alloc:
1374 ath6kl_core_destroy(ar_sdio->ar);
1375err_dma:
1376 kfree(ar_sdio->dma_buffer);
1377err_hif:
1378 kfree(ar_sdio);
1379
1380 return ret;
1381}
1382
1383static void ath6kl_sdio_remove(struct sdio_func *func)
1384{
1385 struct ath6kl_sdio *ar_sdio;
1386
1387 ath6kl_dbg(ATH6KL_DBG_BOOT,
1388 "sdio removed func %d vendor 0x%x device 0x%x\n",
1389 func->num, func->vendor, func->device);
1390
1391 ar_sdio = sdio_get_drvdata(func);
1392
1393 ath6kl_stop_txrx(ar_sdio->ar);
1394 cancel_work_sync(&ar_sdio->wr_async_work);
1395
1396 ath6kl_core_cleanup(ar_sdio->ar);
1397 ath6kl_core_destroy(ar_sdio->ar);
1398
1399 kfree(ar_sdio->dma_buffer);
1400 kfree(ar_sdio);
1401}
1402
1403static const struct sdio_device_id ath6kl_sdio_devices[] = {
1404 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
1405 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
1406 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
1407 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
1408 {},
1409};
1410
1411MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
1412
1413static struct sdio_driver ath6kl_sdio_driver = {
1414 .name = "ath6kl_sdio",
1415 .id_table = ath6kl_sdio_devices,
1416 .probe = ath6kl_sdio_probe,
1417 .remove = ath6kl_sdio_remove,
1418 .drv.pm = ATH6KL_SDIO_PM_OPS,
1419};
1420
1421static int __init ath6kl_sdio_init(void)
1422{
1423 int ret;
1424
1425 ret = sdio_register_driver(&ath6kl_sdio_driver);
1426 if (ret)
1427 ath6kl_err("sdio driver registration failed: %d\n", ret);
1428
1429 return ret;
1430}
1431
1432static void __exit ath6kl_sdio_exit(void)
1433{
1434 sdio_unregister_driver(&ath6kl_sdio_driver);
1435}
1436
1437module_init(ath6kl_sdio_init);
1438module_exit(ath6kl_sdio_exit);
1439
1440MODULE_AUTHOR("Atheros Communications, Inc.");
1441MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
1442MODULE_LICENSE("Dual BSD/GPL");
1443
1444MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE);
1445MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE);
1446MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE);
1447MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
1448MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
1449MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE);
1450MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE);
1451MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE);
1452MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
1453MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
1454MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE);
1455MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
1456MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
1457MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
1458MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
1459MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
1460