linux/drivers/net/wireless/ath/ath6kl/sdio.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/mmc/card.h>
  20#include <linux/mmc/mmc.h>
  21#include <linux/mmc/host.h>
  22#include <linux/mmc/sdio_func.h>
  23#include <linux/mmc/sdio_ids.h>
  24#include <linux/mmc/sdio.h>
  25#include <linux/mmc/sd.h>
  26#include "hif.h"
  27#include "hif-ops.h"
  28#include "target.h"
  29#include "debug.h"
  30#include "cfg80211.h"
  31#include "trace.h"
  32
  33struct ath6kl_sdio {
  34        struct sdio_func *func;
  35
  36        /* protects access to bus_req_freeq */
  37        spinlock_t lock;
  38
  39        /* free list */
  40        struct list_head bus_req_freeq;
  41
  42        /* available bus requests */
  43        struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
  44
  45        struct ath6kl *ar;
  46
  47        u8 *dma_buffer;
  48
  49        /* protects access to dma_buffer */
  50        struct mutex dma_buffer_mutex;
  51
  52        /* scatter request list head */
  53        struct list_head scat_req;
  54
  55        atomic_t irq_handling;
  56        wait_queue_head_t irq_wq;
  57
  58        /* protects access to scat_req */
  59        spinlock_t scat_lock;
  60
  61        bool scatter_enabled;
  62
  63        bool is_disabled;
  64        const struct sdio_device_id *id;
  65        struct work_struct wr_async_work;
  66        struct list_head wr_asyncq;
  67
  68        /* protects access to wr_asyncq */
  69        spinlock_t wr_async_lock;
  70};
  71
  72#define CMD53_ARG_READ          0
  73#define CMD53_ARG_WRITE         1
  74#define CMD53_ARG_BLOCK_BASIS   1
  75#define CMD53_ARG_FIXED_ADDRESS 0
  76#define CMD53_ARG_INCR_ADDRESS  1
  77
  78static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
  79{
  80        return ar->hif_priv;
  81}
  82
  83/*
  84 * Macro to check if DMA buffer is WORD-aligned and DMA-able.
  85 * Most host controllers assume the buffer is DMA'able and will
  86 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
  87 * check fails on stack memory.
  88 */
  89static inline bool buf_needs_bounce(u8 *buf)
  90{
  91        return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
  92}
  93
  94static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
  95{
  96        struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
  97
  98        /* EP1 has an extended range */
  99        mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
 100        mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
 101        mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
 102        mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
 103        mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
 104        mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
 105}
 106
 107static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
 108                                             u8 mode, u8 opcode, u32 addr,
 109                                             u16 blksz)
 110{
 111        *arg = (((rw & 1) << 31) |
 112                ((func & 0x7) << 28) |
 113                ((mode & 1) << 27) |
 114                ((opcode & 1) << 26) |
 115                ((addr & 0x1FFFF) << 9) |
 116                (blksz & 0x1FF));
 117}
 118
 119static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
 120                                             unsigned int address,
 121                                             unsigned char val)
 122{
 123        const u8 func = 0;
 124
 125        *arg = ((write & 1) << 31) |
 126               ((func & 0x7) << 28) |
 127               ((raw & 1) << 27) |
 128               (1 << 26) |
 129               ((address & 0x1FFFF) << 9) |
 130               (1 << 8) |
 131               (val & 0xFF);
 132}
 133
 134static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
 135                                           unsigned int address,
 136                                           unsigned char byte)
 137{
 138        struct mmc_command io_cmd;
 139
 140        memset(&io_cmd, 0, sizeof(io_cmd));
 141        ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
 142        io_cmd.opcode = SD_IO_RW_DIRECT;
 143        io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
 144
 145        return mmc_wait_for_cmd(card->host, &io_cmd, 0);
 146}
 147
 148static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
 149                          u8 *buf, u32 len)
 150{
 151        int ret = 0;
 152
 153        sdio_claim_host(func);
 154
 155        if (request & HIF_WRITE) {
 156                /* FIXME: looks like ugly workaround for something */
 157                if (addr >= HIF_MBOX_BASE_ADDR &&
 158                    addr <= HIF_MBOX_END_ADDR)
 159                        addr += (HIF_MBOX_WIDTH - len);
 160
 161                /* FIXME: this also looks like ugly workaround */
 162                if (addr == HIF_MBOX0_EXT_BASE_ADDR)
 163                        addr += HIF_MBOX0_EXT_WIDTH - len;
 164
 165                if (request & HIF_FIXED_ADDRESS)
 166                        ret = sdio_writesb(func, addr, buf, len);
 167                else
 168                        ret = sdio_memcpy_toio(func, addr, buf, len);
 169        } else {
 170                if (request & HIF_FIXED_ADDRESS)
 171                        ret = sdio_readsb(func, buf, addr, len);
 172                else
 173                        ret = sdio_memcpy_fromio(func, buf, addr, len);
 174        }
 175
 176        sdio_release_host(func);
 177
 178        ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
 179                   request & HIF_WRITE ? "wr" : "rd", addr,
 180                   request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
 181        ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
 182
 183        trace_ath6kl_sdio(addr, request, buf, len);
 184
 185        return ret;
 186}
 187
 188static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
 189{
 190        struct bus_request *bus_req;
 191
 192        spin_lock_bh(&ar_sdio->lock);
 193
 194        if (list_empty(&ar_sdio->bus_req_freeq)) {
 195                spin_unlock_bh(&ar_sdio->lock);
 196                return NULL;
 197        }
 198
 199        bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
 200                                   struct bus_request, list);
 201        list_del(&bus_req->list);
 202
 203        spin_unlock_bh(&ar_sdio->lock);
 204        ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
 205                   __func__, bus_req);
 206
 207        return bus_req;
 208}
 209
 210static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
 211                                     struct bus_request *bus_req)
 212{
 213        ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
 214                   __func__, bus_req);
 215
 216        spin_lock_bh(&ar_sdio->lock);
 217        list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
 218        spin_unlock_bh(&ar_sdio->lock);
 219}
 220
 221static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
 222                                        struct mmc_data *data)
 223{
 224        struct scatterlist *sg;
 225        int i;
 226
 227        data->blksz = HIF_MBOX_BLOCK_SIZE;
 228        data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
 229
 230        ath6kl_dbg(ATH6KL_DBG_SCATTER,
 231                   "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
 232                   (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
 233                   data->blksz, data->blocks, scat_req->len,
 234                   scat_req->scat_entries);
 235
 236        data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
 237                                                    MMC_DATA_READ;
 238
 239        /* fill SG entries */
 240        sg = scat_req->sgentries;
 241        sg_init_table(sg, scat_req->scat_entries);
 242
 243        /* assemble SG list */
 244        for (i = 0; i < scat_req->scat_entries; i++, sg++) {
 245                ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
 246                           i, scat_req->scat_list[i].buf,
 247                           scat_req->scat_list[i].len);
 248
 249                sg_set_buf(sg, scat_req->scat_list[i].buf,
 250                           scat_req->scat_list[i].len);
 251        }
 252
 253        /* set scatter-gather table for request */
 254        data->sg = scat_req->sgentries;
 255        data->sg_len = scat_req->scat_entries;
 256}
 257
 258static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
 259                               struct bus_request *req)
 260{
 261        struct mmc_request mmc_req;
 262        struct mmc_command cmd;
 263        struct mmc_data data;
 264        struct hif_scatter_req *scat_req;
 265        u8 opcode, rw;
 266        int status, len;
 267
 268        scat_req = req->scat_req;
 269
 270        if (scat_req->virt_scat) {
 271                len = scat_req->len;
 272                if (scat_req->req & HIF_BLOCK_BASIS)
 273                        len = round_down(len, HIF_MBOX_BLOCK_SIZE);
 274
 275                status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
 276                                        scat_req->addr, scat_req->virt_dma_buf,
 277                                        len);
 278                goto scat_complete;
 279        }
 280
 281        memset(&mmc_req, 0, sizeof(struct mmc_request));
 282        memset(&cmd, 0, sizeof(struct mmc_command));
 283        memset(&data, 0, sizeof(struct mmc_data));
 284
 285        ath6kl_sdio_setup_scat_data(scat_req, &data);
 286
 287        opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
 288                  CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
 289
 290        rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
 291
 292        /* Fixup the address so that the last byte will fall on MBOX EOM */
 293        if (scat_req->req & HIF_WRITE) {
 294                if (scat_req->addr == HIF_MBOX_BASE_ADDR)
 295                        scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
 296                else
 297                        /* Uses extended address range */
 298                        scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
 299        }
 300
 301        /* set command argument */
 302        ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
 303                                  CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
 304                                  data.blocks);
 305
 306        cmd.opcode = SD_IO_RW_EXTENDED;
 307        cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
 308
 309        mmc_req.cmd = &cmd;
 310        mmc_req.data = &data;
 311
 312        sdio_claim_host(ar_sdio->func);
 313
 314        mmc_set_data_timeout(&data, ar_sdio->func->card);
 315
 316        trace_ath6kl_sdio_scat(scat_req->addr,
 317                               scat_req->req,
 318                               scat_req->len,
 319                               scat_req->scat_entries,
 320                               scat_req->scat_list);
 321
 322        /* synchronous call to process request */
 323        mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
 324
 325        sdio_release_host(ar_sdio->func);
 326
 327        status = cmd.error ? cmd.error : data.error;
 328
 329scat_complete:
 330        scat_req->status = status;
 331
 332        if (scat_req->status)
 333                ath6kl_err("Scatter write request failed:%d\n",
 334                           scat_req->status);
 335
 336        if (scat_req->req & HIF_ASYNCHRONOUS)
 337                scat_req->complete(ar_sdio->ar->htc_target, scat_req);
 338
 339        return status;
 340}
 341
 342static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
 343                                           int n_scat_entry, int n_scat_req,
 344                                           bool virt_scat)
 345{
 346        struct hif_scatter_req *s_req;
 347        struct bus_request *bus_req;
 348        int i, scat_req_sz, scat_list_sz, size;
 349        u8 *virt_buf;
 350
 351        scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
 352        scat_req_sz = sizeof(*s_req) + scat_list_sz;
 353
 354        if (!virt_scat)
 355                size = sizeof(struct scatterlist) * n_scat_entry;
 356        else
 357                size =  2 * L1_CACHE_BYTES +
 358                        ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
 359
 360        for (i = 0; i < n_scat_req; i++) {
 361                /* allocate the scatter request */
 362                s_req = kzalloc(scat_req_sz, GFP_KERNEL);
 363                if (!s_req)
 364                        return -ENOMEM;
 365
 366                if (virt_scat) {
 367                        virt_buf = kzalloc(size, GFP_KERNEL);
 368                        if (!virt_buf) {
 369                                kfree(s_req);
 370                                return -ENOMEM;
 371                        }
 372
 373                        s_req->virt_dma_buf =
 374                                (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
 375                } else {
 376                        /* allocate sglist */
 377                        s_req->sgentries = kzalloc(size, GFP_KERNEL);
 378
 379                        if (!s_req->sgentries) {
 380                                kfree(s_req);
 381                                return -ENOMEM;
 382                        }
 383                }
 384
 385                /* allocate a bus request for this scatter request */
 386                bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
 387                if (!bus_req) {
 388                        kfree(s_req->sgentries);
 389                        kfree(s_req->virt_dma_buf);
 390                        kfree(s_req);
 391                        return -ENOMEM;
 392                }
 393
 394                /* assign the scatter request to this bus request */
 395                bus_req->scat_req = s_req;
 396                s_req->busrequest = bus_req;
 397
 398                s_req->virt_scat = virt_scat;
 399
 400                /* add it to the scatter pool */
 401                hif_scatter_req_add(ar_sdio->ar, s_req);
 402        }
 403
 404        return 0;
 405}
 406
 407static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
 408                                       u32 len, u32 request)
 409{
 410        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 411        u8  *tbuf = NULL;
 412        int ret;
 413        bool bounced = false;
 414
 415        if (request & HIF_BLOCK_BASIS)
 416                len = round_down(len, HIF_MBOX_BLOCK_SIZE);
 417
 418        if (buf_needs_bounce(buf)) {
 419                if (!ar_sdio->dma_buffer)
 420                        return -ENOMEM;
 421                mutex_lock(&ar_sdio->dma_buffer_mutex);
 422                tbuf = ar_sdio->dma_buffer;
 423
 424                if (request & HIF_WRITE)
 425                        memcpy(tbuf, buf, len);
 426
 427                bounced = true;
 428        } else {
 429                tbuf = buf;
 430        }
 431
 432        ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
 433        if ((request & HIF_READ) && bounced)
 434                memcpy(buf, tbuf, len);
 435
 436        if (bounced)
 437                mutex_unlock(&ar_sdio->dma_buffer_mutex);
 438
 439        return ret;
 440}
 441
 442static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
 443                                      struct bus_request *req)
 444{
 445        if (req->scat_req) {
 446                ath6kl_sdio_scat_rw(ar_sdio, req);
 447        } else {
 448                void *context;
 449                int status;
 450
 451                status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
 452                                                     req->buffer, req->length,
 453                                                     req->request);
 454                context = req->packet;
 455                ath6kl_sdio_free_bus_req(ar_sdio, req);
 456                ath6kl_hif_rw_comp_handler(context, status);
 457        }
 458}
 459
 460static void ath6kl_sdio_write_async_work(struct work_struct *work)
 461{
 462        struct ath6kl_sdio *ar_sdio;
 463        struct bus_request *req, *tmp_req;
 464
 465        ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
 466
 467        spin_lock_bh(&ar_sdio->wr_async_lock);
 468        list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
 469                list_del(&req->list);
 470                spin_unlock_bh(&ar_sdio->wr_async_lock);
 471                __ath6kl_sdio_write_async(ar_sdio, req);
 472                spin_lock_bh(&ar_sdio->wr_async_lock);
 473        }
 474        spin_unlock_bh(&ar_sdio->wr_async_lock);
 475}
 476
 477static void ath6kl_sdio_irq_handler(struct sdio_func *func)
 478{
 479        int status;
 480        struct ath6kl_sdio *ar_sdio;
 481
 482        ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
 483
 484        ar_sdio = sdio_get_drvdata(func);
 485        atomic_set(&ar_sdio->irq_handling, 1);
 486        /*
 487         * Release the host during interrups so we can pick it back up when
 488         * we process commands.
 489         */
 490        sdio_release_host(ar_sdio->func);
 491
 492        status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
 493        sdio_claim_host(ar_sdio->func);
 494
 495        atomic_set(&ar_sdio->irq_handling, 0);
 496        wake_up(&ar_sdio->irq_wq);
 497
 498        WARN_ON(status && status != -ECANCELED);
 499}
 500
 501static int ath6kl_sdio_power_on(struct ath6kl *ar)
 502{
 503        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 504        struct sdio_func *func = ar_sdio->func;
 505        int ret = 0;
 506
 507        if (!ar_sdio->is_disabled)
 508                return 0;
 509
 510        ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
 511
 512        sdio_claim_host(func);
 513
 514        ret = sdio_enable_func(func);
 515        if (ret) {
 516                ath6kl_err("Unable to enable sdio func: %d)\n", ret);
 517                sdio_release_host(func);
 518                return ret;
 519        }
 520
 521        sdio_release_host(func);
 522
 523        /*
 524         * Wait for hardware to initialise. It should take a lot less than
 525         * 10 ms but let's be conservative here.
 526         */
 527        msleep(10);
 528
 529        ar_sdio->is_disabled = false;
 530
 531        return ret;
 532}
 533
 534static int ath6kl_sdio_power_off(struct ath6kl *ar)
 535{
 536        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 537        int ret;
 538
 539        if (ar_sdio->is_disabled)
 540                return 0;
 541
 542        ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
 543
 544        /* Disable the card */
 545        sdio_claim_host(ar_sdio->func);
 546        ret = sdio_disable_func(ar_sdio->func);
 547        sdio_release_host(ar_sdio->func);
 548
 549        if (ret)
 550                return ret;
 551
 552        ar_sdio->is_disabled = true;
 553
 554        return ret;
 555}
 556
 557static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
 558                                   u32 length, u32 request,
 559                                   struct htc_packet *packet)
 560{
 561        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 562        struct bus_request *bus_req;
 563
 564        bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
 565
 566        if (WARN_ON_ONCE(!bus_req))
 567                return -ENOMEM;
 568
 569        bus_req->address = address;
 570        bus_req->buffer = buffer;
 571        bus_req->length = length;
 572        bus_req->request = request;
 573        bus_req->packet = packet;
 574
 575        spin_lock_bh(&ar_sdio->wr_async_lock);
 576        list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
 577        spin_unlock_bh(&ar_sdio->wr_async_lock);
 578        queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
 579
 580        return 0;
 581}
 582
 583static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
 584{
 585        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 586        int ret;
 587
 588        sdio_claim_host(ar_sdio->func);
 589
 590        /* Register the isr */
 591        ret =  sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
 592        if (ret)
 593                ath6kl_err("Failed to claim sdio irq: %d\n", ret);
 594
 595        sdio_release_host(ar_sdio->func);
 596}
 597
 598static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar)
 599{
 600        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 601
 602        return !atomic_read(&ar_sdio->irq_handling);
 603}
 604
 605static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
 606{
 607        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 608        int ret;
 609
 610        sdio_claim_host(ar_sdio->func);
 611
 612        if (atomic_read(&ar_sdio->irq_handling)) {
 613                sdio_release_host(ar_sdio->func);
 614
 615                ret = wait_event_interruptible(ar_sdio->irq_wq,
 616                                               ath6kl_sdio_is_on_irq(ar));
 617                if (ret)
 618                        return;
 619
 620                sdio_claim_host(ar_sdio->func);
 621        }
 622
 623        ret = sdio_release_irq(ar_sdio->func);
 624        if (ret)
 625                ath6kl_err("Failed to release sdio irq: %d\n", ret);
 626
 627        sdio_release_host(ar_sdio->func);
 628}
 629
 630static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
 631{
 632        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 633        struct hif_scatter_req *node = NULL;
 634
 635        spin_lock_bh(&ar_sdio->scat_lock);
 636
 637        if (!list_empty(&ar_sdio->scat_req)) {
 638                node = list_first_entry(&ar_sdio->scat_req,
 639                                        struct hif_scatter_req, list);
 640                list_del(&node->list);
 641
 642                node->scat_q_depth = get_queue_depth(&ar_sdio->scat_req);
 643        }
 644
 645        spin_unlock_bh(&ar_sdio->scat_lock);
 646
 647        return node;
 648}
 649
 650static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
 651                                        struct hif_scatter_req *s_req)
 652{
 653        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 654
 655        spin_lock_bh(&ar_sdio->scat_lock);
 656
 657        list_add_tail(&s_req->list, &ar_sdio->scat_req);
 658
 659        spin_unlock_bh(&ar_sdio->scat_lock);
 660}
 661
 662/* scatter gather read write request */
 663static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
 664                                        struct hif_scatter_req *scat_req)
 665{
 666        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 667        u32 request = scat_req->req;
 668        int status = 0;
 669
 670        if (!scat_req->len)
 671                return -EINVAL;
 672
 673        ath6kl_dbg(ATH6KL_DBG_SCATTER,
 674                   "hif-scatter: total len: %d scatter entries: %d\n",
 675                   scat_req->len, scat_req->scat_entries);
 676
 677        if (request & HIF_SYNCHRONOUS) {
 678                status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
 679        } else {
 680                spin_lock_bh(&ar_sdio->wr_async_lock);
 681                list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
 682                spin_unlock_bh(&ar_sdio->wr_async_lock);
 683                queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
 684        }
 685
 686        return status;
 687}
 688
 689/* clean up scatter support */
 690static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
 691{
 692        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 693        struct hif_scatter_req *s_req, *tmp_req;
 694
 695        /* empty the free list */
 696        spin_lock_bh(&ar_sdio->scat_lock);
 697        list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
 698                list_del(&s_req->list);
 699                spin_unlock_bh(&ar_sdio->scat_lock);
 700
 701                /*
 702                 * FIXME: should we also call completion handler with
 703                 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
 704                 * that the packet is properly freed?
 705                 */
 706                if (s_req->busrequest)
 707                        ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
 708                kfree(s_req->virt_dma_buf);
 709                kfree(s_req->sgentries);
 710                kfree(s_req);
 711
 712                spin_lock_bh(&ar_sdio->scat_lock);
 713        }
 714        spin_unlock_bh(&ar_sdio->scat_lock);
 715}
 716
 717/* setup of HIF scatter resources */
 718static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
 719{
 720        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 721        struct htc_target *target = ar->htc_target;
 722        int ret = 0;
 723        bool virt_scat = false;
 724
 725        if (ar_sdio->scatter_enabled)
 726                return 0;
 727
 728        ar_sdio->scatter_enabled = true;
 729
 730        /* check if host supports scatter and it meets our requirements */
 731        if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
 732                ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
 733                           ar_sdio->func->card->host->max_segs,
 734                           MAX_SCATTER_ENTRIES_PER_REQ);
 735                virt_scat = true;
 736        }
 737
 738        if (!virt_scat) {
 739                ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
 740                                MAX_SCATTER_ENTRIES_PER_REQ,
 741                                MAX_SCATTER_REQUESTS, virt_scat);
 742
 743                if (!ret) {
 744                        ath6kl_dbg(ATH6KL_DBG_BOOT,
 745                                   "hif-scatter enabled requests %d entries %d\n",
 746                                   MAX_SCATTER_REQUESTS,
 747                                   MAX_SCATTER_ENTRIES_PER_REQ);
 748
 749                        target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
 750                        target->max_xfer_szper_scatreq =
 751                                                MAX_SCATTER_REQ_TRANSFER_SIZE;
 752                } else {
 753                        ath6kl_sdio_cleanup_scatter(ar);
 754                        ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
 755                }
 756        }
 757
 758        if (virt_scat || ret) {
 759                ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
 760                                ATH6KL_SCATTER_ENTRIES_PER_REQ,
 761                                ATH6KL_SCATTER_REQS, virt_scat);
 762
 763                if (ret) {
 764                        ath6kl_err("failed to alloc virtual scatter resources !\n");
 765                        ath6kl_sdio_cleanup_scatter(ar);
 766                        return ret;
 767                }
 768
 769                ath6kl_dbg(ATH6KL_DBG_BOOT,
 770                           "virtual scatter enabled requests %d entries %d\n",
 771                           ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
 772
 773                target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
 774                target->max_xfer_szper_scatreq =
 775                                        ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
 776        }
 777
 778        return 0;
 779}
 780
 781static int ath6kl_sdio_config(struct ath6kl *ar)
 782{
 783        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 784        struct sdio_func *func = ar_sdio->func;
 785        int ret;
 786
 787        sdio_claim_host(func);
 788
 789        if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
 790            MANUFACTURER_ID_AR6003_BASE) {
 791                /* enable 4-bit ASYNC interrupt on AR6003 or later */
 792                ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
 793                                                CCCR_SDIO_IRQ_MODE_REG,
 794                                                SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
 795                if (ret) {
 796                        ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
 797                                   ret);
 798                        goto out;
 799                }
 800
 801                ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
 802        }
 803
 804        /* give us some time to enable, in ms */
 805        func->enable_timeout = 100;
 806
 807        ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
 808        if (ret) {
 809                ath6kl_err("Set sdio block size %d failed: %d)\n",
 810                           HIF_MBOX_BLOCK_SIZE, ret);
 811                goto out;
 812        }
 813
 814out:
 815        sdio_release_host(func);
 816
 817        return ret;
 818}
 819
 820static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar)
 821{
 822        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 823        struct sdio_func *func = ar_sdio->func;
 824        mmc_pm_flag_t flags;
 825        int ret;
 826
 827        flags = sdio_get_host_pm_caps(func);
 828
 829        ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
 830
 831        if (!(flags & MMC_PM_WAKE_SDIO_IRQ) ||
 832            !(flags & MMC_PM_KEEP_POWER))
 833                return -EINVAL;
 834
 835        ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
 836        if (ret) {
 837                ath6kl_err("set sdio keep pwr flag failed: %d\n", ret);
 838                return ret;
 839        }
 840
 841        /* sdio irq wakes up host */
 842        ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
 843        if (ret)
 844                ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
 845
 846        return ret;
 847}
 848
 849static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
 850{
 851        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
 852        struct sdio_func *func = ar_sdio->func;
 853        mmc_pm_flag_t flags;
 854        bool try_deepsleep = false;
 855        int ret;
 856
 857        if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
 858            (!ar->suspend_mode && wow)) {
 859                ret = ath6kl_set_sdio_pm_caps(ar);
 860                if (ret)
 861                        goto cut_pwr;
 862
 863                ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
 864                if (ret && ret != -ENOTCONN)
 865                        ath6kl_err("wow suspend failed: %d\n", ret);
 866
 867                if (ret &&
 868                    (!ar->wow_suspend_mode ||
 869                     ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP))
 870                        try_deepsleep = true;
 871                else if (ret &&
 872                         ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR)
 873                        goto cut_pwr;
 874                if (!ret)
 875                        return 0;
 876        }
 877
 878        if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
 879            !ar->suspend_mode || try_deepsleep) {
 880                flags = sdio_get_host_pm_caps(func);
 881                if (!(flags & MMC_PM_KEEP_POWER))
 882                        goto cut_pwr;
 883
 884                ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
 885                if (ret)
 886                        goto cut_pwr;
 887
 888                /*
 889                 * Workaround to support Deep Sleep with MSM, set the host pm
 890                 * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable
 891                 * the sdc2_clock and internally allows MSM to enter
 892                 * TCXO shutdown properly.
 893                 */
 894                if ((flags & MMC_PM_WAKE_SDIO_IRQ)) {
 895                        ret = sdio_set_host_pm_flags(func,
 896                                                MMC_PM_WAKE_SDIO_IRQ);
 897                        if (ret)
 898                                goto cut_pwr;
 899                }
 900
 901                ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP,
 902                                              NULL);
 903                if (ret)
 904                        goto cut_pwr;
 905
 906                return 0;
 907        }
 908
 909cut_pwr:
 910        if (func->card && func->card->host)
 911                func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER;
 912
 913        return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
 914}
 915
 916static int ath6kl_sdio_resume(struct ath6kl *ar)
 917{
 918        switch (ar->state) {
 919        case ATH6KL_STATE_OFF:
 920        case ATH6KL_STATE_CUTPOWER:
 921                ath6kl_dbg(ATH6KL_DBG_SUSPEND,
 922                           "sdio resume configuring sdio\n");
 923
 924                /* need to set sdio settings after power is cut from sdio */
 925                ath6kl_sdio_config(ar);
 926                break;
 927
 928        case ATH6KL_STATE_ON:
 929                break;
 930
 931        case ATH6KL_STATE_DEEPSLEEP:
 932                break;
 933
 934        case ATH6KL_STATE_WOW:
 935                break;
 936
 937        case ATH6KL_STATE_SUSPENDING:
 938                break;
 939
 940        case ATH6KL_STATE_RESUMING:
 941                break;
 942
 943        case ATH6KL_STATE_RECOVERY:
 944                break;
 945        }
 946
 947        ath6kl_cfg80211_resume(ar);
 948
 949        return 0;
 950}
 951
 952/* set the window address register (using 4-byte register access ). */
 953static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
 954{
 955        int status;
 956        u8 addr_val[4];
 957        s32 i;
 958
 959        /*
 960         * Write bytes 1,2,3 of the register to set the upper address bytes,
 961         * the LSB is written last to initiate the access cycle
 962         */
 963
 964        for (i = 1; i <= 3; i++) {
 965                /*
 966                 * Fill the buffer with the address byte value we want to
 967                 * hit 4 times.
 968                 */
 969                memset(addr_val, ((u8 *)&addr)[i], 4);
 970
 971                /*
 972                 * Hit each byte of the register address with a 4-byte
 973                 * write operation to the same address, this is a harmless
 974                 * operation.
 975                 */
 976                status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
 977                                             4, HIF_WR_SYNC_BYTE_FIX);
 978                if (status)
 979                        break;
 980        }
 981
 982        if (status) {
 983                ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n",
 984                           __func__, addr, reg_addr);
 985                return status;
 986        }
 987
 988        /*
 989         * Write the address register again, this time write the whole
 990         * 4-byte value. The effect here is that the LSB write causes the
 991         * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
 992         * effect since we are writing the same values again
 993         */
 994        status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
 995                                     4, HIF_WR_SYNC_BYTE_INC);
 996
 997        if (status) {
 998                ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
 999                           __func__, addr, reg_addr);
1000                return status;
1001        }
1002
1003        return 0;
1004}
1005
1006static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
1007{
1008        int status;
1009
1010        /* set window register to start read cycle */
1011        status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
1012                                        address);
1013
1014        if (status)
1015                return status;
1016
1017        /* read the data */
1018        status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1019                                (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
1020        if (status) {
1021                ath6kl_err("%s: failed to read from window data addr\n",
1022                           __func__);
1023                return status;
1024        }
1025
1026        return status;
1027}
1028
1029static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
1030                                    __le32 data)
1031{
1032        int status;
1033        u32 val = (__force u32) data;
1034
1035        /* set write data */
1036        status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
1037                                (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
1038        if (status) {
1039                ath6kl_err("%s: failed to write 0x%x to window data addr\n",
1040                           __func__, data);
1041                return status;
1042        }
1043
1044        /* set window register, which starts the write cycle */
1045        return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
1046                                      address);
1047}
1048
1049static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
1050{
1051        u32 addr;
1052        unsigned long timeout;
1053        int ret;
1054
1055        ar->bmi.cmd_credits = 0;
1056
1057        /* Read the counter register to get the command credits */
1058        addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
1059
1060        timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1061        while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
1062                /*
1063                 * Hit the credit counter with a 4-byte access, the first byte
1064                 * read will hit the counter and cause a decrement, while the
1065                 * remaining 3 bytes has no effect. The rationale behind this
1066                 * is to make all HIF accesses 4-byte aligned.
1067                 */
1068                ret = ath6kl_sdio_read_write_sync(ar, addr,
1069                                         (u8 *)&ar->bmi.cmd_credits, 4,
1070                                         HIF_RD_SYNC_BYTE_INC);
1071                if (ret) {
1072                        ath6kl_err("Unable to decrement the command credit count register: %d\n",
1073                                   ret);
1074                        return ret;
1075                }
1076
1077                /* The counter is only 8 bits.
1078                 * Ignore anything in the upper 3 bytes
1079                 */
1080                ar->bmi.cmd_credits &= 0xFF;
1081        }
1082
1083        if (!ar->bmi.cmd_credits) {
1084                ath6kl_err("bmi communication timeout\n");
1085                return -ETIMEDOUT;
1086        }
1087
1088        return 0;
1089}
1090
1091static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
1092{
1093        unsigned long timeout;
1094        u32 rx_word = 0;
1095        int ret = 0;
1096
1097        timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
1098        while ((time_before(jiffies, timeout)) && !rx_word) {
1099                ret = ath6kl_sdio_read_write_sync(ar,
1100                                        RX_LOOKAHEAD_VALID_ADDRESS,
1101                                        (u8 *)&rx_word, sizeof(rx_word),
1102                                        HIF_RD_SYNC_BYTE_INC);
1103                if (ret) {
1104                        ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
1105                        return ret;
1106                }
1107
1108                 /* all we really want is one bit */
1109                rx_word &= (1 << ENDPOINT1);
1110        }
1111
1112        if (!rx_word) {
1113                ath6kl_err("bmi_recv_buf FIFO empty\n");
1114                return -EINVAL;
1115        }
1116
1117        return ret;
1118}
1119
1120static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1121{
1122        int ret;
1123        u32 addr;
1124
1125        ret = ath6kl_sdio_bmi_credits(ar);
1126        if (ret)
1127                return ret;
1128
1129        addr = ar->mbox_info.htc_addr;
1130
1131        ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1132                                          HIF_WR_SYNC_BYTE_INC);
1133        if (ret) {
1134                ath6kl_err("unable to send the bmi data to the device\n");
1135                return ret;
1136        }
1137
1138        return 0;
1139}
1140
1141static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
1142{
1143        int ret;
1144        u32 addr;
1145
1146        /*
1147         * During normal bootup, small reads may be required.
1148         * Rather than issue an HIF Read and then wait as the Target
1149         * adds successive bytes to the FIFO, we wait here until
1150         * we know that response data is available.
1151         *
1152         * This allows us to cleanly timeout on an unexpected
1153         * Target failure rather than risk problems at the HIF level.
1154         * In particular, this avoids SDIO timeouts and possibly garbage
1155         * data on some host controllers.  And on an interconnect
1156         * such as Compact Flash (as well as some SDIO masters) which
1157         * does not provide any indication on data timeout, it avoids
1158         * a potential hang or garbage response.
1159         *
1160         * Synchronization is more difficult for reads larger than the
1161         * size of the MBOX FIFO (128B), because the Target is unable
1162         * to push the 129th byte of data until AFTER the Host posts an
1163         * HIF Read and removes some FIFO data.  So for large reads the
1164         * Host proceeds to post an HIF Read BEFORE all the data is
1165         * actually available to read.  Fortunately, large BMI reads do
1166         * not occur in practice -- they're supported for debug/development.
1167         *
1168         * So Host/Target BMI synchronization is divided into these cases:
1169         *  CASE 1: length < 4
1170         *        Should not happen
1171         *
1172         *  CASE 2: 4 <= length <= 128
1173         *        Wait for first 4 bytes to be in FIFO
1174         *        If CONSERVATIVE_BMI_READ is enabled, also wait for
1175         *        a BMI command credit, which indicates that the ENTIRE
1176         *        response is available in the the FIFO
1177         *
1178         *  CASE 3: length > 128
1179         *        Wait for the first 4 bytes to be in FIFO
1180         *
1181         * For most uses, a small timeout should be sufficient and we will
1182         * usually see a response quickly; but there may be some unusual
1183         * (debug) cases of BMI_EXECUTE where we want an larger timeout.
1184         * For now, we use an unbounded busy loop while waiting for
1185         * BMI_EXECUTE.
1186         *
1187         * If BMI_EXECUTE ever needs to support longer-latency execution,
1188         * especially in production, this code needs to be enhanced to sleep
1189         * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
1190         * a function of Host processor speed.
1191         */
1192        if (len >= 4) { /* NB: Currently, always true */
1193                ret = ath6kl_bmi_get_rx_lkahd(ar);
1194                if (ret)
1195                        return ret;
1196        }
1197
1198        addr = ar->mbox_info.htc_addr;
1199        ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1200                                  HIF_RD_SYNC_BYTE_INC);
1201        if (ret) {
1202                ath6kl_err("Unable to read the bmi data from the device: %d\n",
1203                           ret);
1204                return ret;
1205        }
1206
1207        return 0;
1208}
1209
1210static void ath6kl_sdio_stop(struct ath6kl *ar)
1211{
1212        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
1213        struct bus_request *req, *tmp_req;
1214        void *context;
1215
1216        /* FIXME: make sure that wq is not queued again */
1217
1218        cancel_work_sync(&ar_sdio->wr_async_work);
1219
1220        spin_lock_bh(&ar_sdio->wr_async_lock);
1221
1222        list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1223                list_del(&req->list);
1224
1225                if (req->scat_req) {
1226                        /* this is a scatter gather request */
1227                        req->scat_req->status = -ECANCELED;
1228                        req->scat_req->complete(ar_sdio->ar->htc_target,
1229                                                req->scat_req);
1230                } else {
1231                        context = req->packet;
1232                        ath6kl_sdio_free_bus_req(ar_sdio, req);
1233                        ath6kl_hif_rw_comp_handler(context, -ECANCELED);
1234                }
1235        }
1236
1237        spin_unlock_bh(&ar_sdio->wr_async_lock);
1238
1239        WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
1240}
1241
1242static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
1243        .read_write_sync = ath6kl_sdio_read_write_sync,
1244        .write_async = ath6kl_sdio_write_async,
1245        .irq_enable = ath6kl_sdio_irq_enable,
1246        .irq_disable = ath6kl_sdio_irq_disable,
1247        .scatter_req_get = ath6kl_sdio_scatter_req_get,
1248        .scatter_req_add = ath6kl_sdio_scatter_req_add,
1249        .enable_scatter = ath6kl_sdio_enable_scatter,
1250        .scat_req_rw = ath6kl_sdio_async_rw_scatter,
1251        .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
1252        .suspend = ath6kl_sdio_suspend,
1253        .resume = ath6kl_sdio_resume,
1254        .diag_read32 = ath6kl_sdio_diag_read32,
1255        .diag_write32 = ath6kl_sdio_diag_write32,
1256        .bmi_read = ath6kl_sdio_bmi_read,
1257        .bmi_write = ath6kl_sdio_bmi_write,
1258        .power_on = ath6kl_sdio_power_on,
1259        .power_off = ath6kl_sdio_power_off,
1260        .stop = ath6kl_sdio_stop,
1261};
1262
1263#ifdef CONFIG_PM_SLEEP
1264
1265/*
1266 * Empty handlers so that mmc subsystem doesn't remove us entirely during
1267 * suspend. We instead follow cfg80211 suspend/resume handlers.
1268 */
1269static int ath6kl_sdio_pm_suspend(struct device *device)
1270{
1271        ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
1272
1273        return 0;
1274}
1275
1276static int ath6kl_sdio_pm_resume(struct device *device)
1277{
1278        ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
1279
1280        return 0;
1281}
1282
1283static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
1284                         ath6kl_sdio_pm_resume);
1285
1286#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
1287
1288#else
1289
1290#define ATH6KL_SDIO_PM_OPS NULL
1291
1292#endif /* CONFIG_PM_SLEEP */
1293
1294static int ath6kl_sdio_probe(struct sdio_func *func,
1295                             const struct sdio_device_id *id)
1296{
1297        int ret;
1298        struct ath6kl_sdio *ar_sdio;
1299        struct ath6kl *ar;
1300        int count;
1301
1302        ath6kl_dbg(ATH6KL_DBG_BOOT,
1303                   "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
1304                   func->num, func->vendor, func->device,
1305                   func->max_blksize, func->cur_blksize);
1306
1307        ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
1308        if (!ar_sdio)
1309                return -ENOMEM;
1310
1311        ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
1312        if (!ar_sdio->dma_buffer) {
1313                ret = -ENOMEM;
1314                goto err_hif;
1315        }
1316
1317        ar_sdio->func = func;
1318        sdio_set_drvdata(func, ar_sdio);
1319
1320        ar_sdio->id = id;
1321        ar_sdio->is_disabled = true;
1322
1323        spin_lock_init(&ar_sdio->lock);
1324        spin_lock_init(&ar_sdio->scat_lock);
1325        spin_lock_init(&ar_sdio->wr_async_lock);
1326        mutex_init(&ar_sdio->dma_buffer_mutex);
1327
1328        INIT_LIST_HEAD(&ar_sdio->scat_req);
1329        INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
1330        INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
1331
1332        INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
1333
1334        init_waitqueue_head(&ar_sdio->irq_wq);
1335
1336        for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
1337                ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
1338
1339        ar = ath6kl_core_create(&ar_sdio->func->dev);
1340        if (!ar) {
1341                ath6kl_err("Failed to alloc ath6kl core\n");
1342                ret = -ENOMEM;
1343                goto err_dma;
1344        }
1345
1346        ar_sdio->ar = ar;
1347        ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
1348        ar->hif_priv = ar_sdio;
1349        ar->hif_ops = &ath6kl_sdio_ops;
1350        ar->bmi.max_data_size = 256;
1351
1352        ath6kl_sdio_set_mbox_info(ar);
1353
1354        ret = ath6kl_sdio_config(ar);
1355        if (ret) {
1356                ath6kl_err("Failed to config sdio: %d\n", ret);
1357                goto err_core_alloc;
1358        }
1359
1360        ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX);
1361        if (ret) {
1362                ath6kl_err("Failed to init ath6kl core\n");
1363                goto err_core_alloc;
1364        }
1365
1366        return ret;
1367
1368err_core_alloc:
1369        ath6kl_core_destroy(ar_sdio->ar);
1370err_dma:
1371        kfree(ar_sdio->dma_buffer);
1372err_hif:
1373        kfree(ar_sdio);
1374
1375        return ret;
1376}
1377
1378static void ath6kl_sdio_remove(struct sdio_func *func)
1379{
1380        struct ath6kl_sdio *ar_sdio;
1381
1382        ath6kl_dbg(ATH6KL_DBG_BOOT,
1383                   "sdio removed func %d vendor 0x%x device 0x%x\n",
1384                   func->num, func->vendor, func->device);
1385
1386        ar_sdio = sdio_get_drvdata(func);
1387
1388        ath6kl_stop_txrx(ar_sdio->ar);
1389        cancel_work_sync(&ar_sdio->wr_async_work);
1390
1391        ath6kl_core_cleanup(ar_sdio->ar);
1392        ath6kl_core_destroy(ar_sdio->ar);
1393
1394        kfree(ar_sdio->dma_buffer);
1395        kfree(ar_sdio);
1396}
1397
1398static const struct sdio_device_id ath6kl_sdio_devices[] = {
1399        {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
1400        {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
1401        {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
1402        {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
1403        {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
1404        {},
1405};
1406
1407MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
1408
1409static struct sdio_driver ath6kl_sdio_driver = {
1410        .name = "ath6kl_sdio",
1411        .id_table = ath6kl_sdio_devices,
1412        .probe = ath6kl_sdio_probe,
1413        .remove = ath6kl_sdio_remove,
1414        .drv.pm = ATH6KL_SDIO_PM_OPS,
1415};
1416
1417static int __init ath6kl_sdio_init(void)
1418{
1419        int ret;
1420
1421        ret = sdio_register_driver(&ath6kl_sdio_driver);
1422        if (ret)
1423                ath6kl_err("sdio driver registration failed: %d\n", ret);
1424
1425        return ret;
1426}
1427
1428static void __exit ath6kl_sdio_exit(void)
1429{
1430        sdio_unregister_driver(&ath6kl_sdio_driver);
1431}
1432
1433module_init(ath6kl_sdio_init);
1434module_exit(ath6kl_sdio_exit);
1435
1436MODULE_AUTHOR("Atheros Communications, Inc.");
1437MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
1438MODULE_LICENSE("Dual BSD/GPL");
1439
1440MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE);
1441MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE);
1442MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE);
1443MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
1444MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
1445MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE);
1446MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE);
1447MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE);
1448MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
1449MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
1450MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE);
1451MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
1452MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
1453MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
1454MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
1455MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
1456MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE);
1457MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
1458MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
1459MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE);
1460MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE);
1461MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE);
1462