linux/drivers/net/wireless/ath/ath10k/sdio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (c) 2004-2011 Atheros Communications Inc.
   4 * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
   5 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/mmc/card.h>
  10#include <linux/mmc/mmc.h>
  11#include <linux/mmc/host.h>
  12#include <linux/mmc/sdio_func.h>
  13#include <linux/mmc/sdio_ids.h>
  14#include <linux/mmc/sdio.h>
  15#include <linux/mmc/sd.h>
  16#include <linux/bitfield.h>
  17#include "core.h"
  18#include "bmi.h"
  19#include "debug.h"
  20#include "hif.h"
  21#include "htc.h"
  22#include "mac.h"
  23#include "targaddrs.h"
  24#include "trace.h"
  25#include "sdio.h"
  26
  27#define ATH10K_SDIO_VSG_BUF_SIZE        (64 * 1024)
  28
  29/* inlined helper functions */
  30
  31static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
  32                                                   size_t len)
  33{
  34        return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
  35}
  36
  37static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
  38{
  39        return (enum ath10k_htc_ep_id)pipe_id;
  40}
  41
  42static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
  43{
  44        dev_kfree_skb(pkt->skb);
  45        pkt->skb = NULL;
  46        pkt->alloc_len = 0;
  47        pkt->act_len = 0;
  48        pkt->trailer_only = false;
  49}
  50
  51static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
  52                                                size_t act_len, size_t full_len,
  53                                                bool part_of_bundle,
  54                                                bool last_in_bundle)
  55{
  56        pkt->skb = dev_alloc_skb(full_len);
  57        if (!pkt->skb)
  58                return -ENOMEM;
  59
  60        pkt->act_len = act_len;
  61        pkt->alloc_len = full_len;
  62        pkt->part_of_bundle = part_of_bundle;
  63        pkt->last_in_bundle = last_in_bundle;
  64        pkt->trailer_only = false;
  65
  66        return 0;
  67}
  68
  69static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
  70{
  71        bool trailer_only = false;
  72        struct ath10k_htc_hdr *htc_hdr =
  73                (struct ath10k_htc_hdr *)pkt->skb->data;
  74        u16 len = __le16_to_cpu(htc_hdr->len);
  75
  76        if (len == htc_hdr->trailer_len)
  77                trailer_only = true;
  78
  79        return trailer_only;
  80}
  81
  82/* sdio/mmc functions */
  83
  84static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
  85                                             unsigned int address,
  86                                             unsigned char val)
  87{
  88        *arg = FIELD_PREP(BIT(31), write) |
  89               FIELD_PREP(BIT(27), raw) |
  90               FIELD_PREP(BIT(26), 1) |
  91               FIELD_PREP(GENMASK(25, 9), address) |
  92               FIELD_PREP(BIT(8), 1) |
  93               FIELD_PREP(GENMASK(7, 0), val);
  94}
  95
  96static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
  97                                           unsigned int address,
  98                                           unsigned char byte)
  99{
 100        struct mmc_command io_cmd;
 101
 102        memset(&io_cmd, 0, sizeof(io_cmd));
 103        ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
 104        io_cmd.opcode = SD_IO_RW_DIRECT;
 105        io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
 106
 107        return mmc_wait_for_cmd(card->host, &io_cmd, 0);
 108}
 109
 110static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
 111                                           unsigned int address,
 112                                           unsigned char *byte)
 113{
 114        struct mmc_command io_cmd;
 115        int ret;
 116
 117        memset(&io_cmd, 0, sizeof(io_cmd));
 118        ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
 119        io_cmd.opcode = SD_IO_RW_DIRECT;
 120        io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
 121
 122        ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
 123        if (!ret)
 124                *byte = io_cmd.resp[0];
 125
 126        return ret;
 127}
 128
 129static int ath10k_sdio_config(struct ath10k *ar)
 130{
 131        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 132        struct sdio_func *func = ar_sdio->func;
 133        unsigned char byte, asyncintdelay = 2;
 134        int ret;
 135
 136        ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
 137
 138        sdio_claim_host(func);
 139
 140        byte = 0;
 141        ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
 142                                              SDIO_CCCR_DRIVE_STRENGTH,
 143                                              &byte);
 144
 145        byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
 146        byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
 147                           ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
 148
 149        ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 150                                              SDIO_CCCR_DRIVE_STRENGTH,
 151                                              byte);
 152
 153        byte = 0;
 154        ret = ath10k_sdio_func0_cmd52_rd_byte(
 155                func->card,
 156                CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
 157                &byte);
 158
 159        byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
 160                 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
 161                 CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
 162
 163        ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 164                                              CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
 165                                              byte);
 166        if (ret) {
 167                ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
 168                goto out;
 169        }
 170
 171        byte = 0;
 172        ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
 173                                              CCCR_SDIO_IRQ_MODE_REG_SDIO3,
 174                                              &byte);
 175
 176        byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
 177
 178        ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 179                                              CCCR_SDIO_IRQ_MODE_REG_SDIO3,
 180                                              byte);
 181        if (ret) {
 182                ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
 183                            ret);
 184                goto out;
 185        }
 186
 187        byte = 0;
 188        ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
 189                                              CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
 190                                              &byte);
 191
 192        byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
 193        byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
 194
 195        ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
 196                                              CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
 197                                              byte);
 198
 199        /* give us some time to enable, in ms */
 200        func->enable_timeout = 100;
 201
 202        ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
 203        if (ret) {
 204                ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
 205                            ar_sdio->mbox_info.block_size, ret);
 206                goto out;
 207        }
 208
 209out:
 210        sdio_release_host(func);
 211        return ret;
 212}
 213
 214static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
 215{
 216        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 217        struct sdio_func *func = ar_sdio->func;
 218        int ret;
 219
 220        sdio_claim_host(func);
 221
 222        sdio_writel(func, val, addr, &ret);
 223        if (ret) {
 224                ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
 225                            val, addr, ret);
 226                goto out;
 227        }
 228
 229        ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
 230                   addr, val);
 231
 232out:
 233        sdio_release_host(func);
 234
 235        return ret;
 236}
 237
 238static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
 239{
 240        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 241        struct sdio_func *func = ar_sdio->func;
 242        __le32 *buf;
 243        int ret;
 244
 245        buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 246        if (!buf)
 247                return -ENOMEM;
 248
 249        *buf = cpu_to_le32(val);
 250
 251        sdio_claim_host(func);
 252
 253        ret = sdio_writesb(func, addr, buf, sizeof(*buf));
 254        if (ret) {
 255                ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
 256                            val, addr, ret);
 257                goto out;
 258        }
 259
 260        ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
 261                   addr, val);
 262
 263out:
 264        sdio_release_host(func);
 265
 266        kfree(buf);
 267
 268        return ret;
 269}
 270
 271static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
 272{
 273        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 274        struct sdio_func *func = ar_sdio->func;
 275        int ret;
 276
 277        sdio_claim_host(func);
 278        *val = sdio_readl(func, addr, &ret);
 279        if (ret) {
 280                ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
 281                            addr, ret);
 282                goto out;
 283        }
 284
 285        ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
 286                   addr, *val);
 287
 288out:
 289        sdio_release_host(func);
 290
 291        return ret;
 292}
 293
 294static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
 295{
 296        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 297        struct sdio_func *func = ar_sdio->func;
 298        int ret;
 299
 300        sdio_claim_host(func);
 301
 302        ret = sdio_memcpy_fromio(func, buf, addr, len);
 303        if (ret) {
 304                ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
 305                            addr, ret);
 306                goto out;
 307        }
 308
 309        ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
 310                   addr, buf, len);
 311        ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
 312
 313out:
 314        sdio_release_host(func);
 315
 316        return ret;
 317}
 318
 319static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
 320{
 321        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 322        struct sdio_func *func = ar_sdio->func;
 323        int ret;
 324
 325        sdio_claim_host(func);
 326
 327        /* For some reason toio() doesn't have const for the buffer, need
 328         * an ugly hack to workaround that.
 329         */
 330        ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
 331        if (ret) {
 332                ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
 333                            addr, ret);
 334                goto out;
 335        }
 336
 337        ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
 338                   addr, buf, len);
 339        ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
 340
 341out:
 342        sdio_release_host(func);
 343
 344        return ret;
 345}
 346
 347static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
 348{
 349        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 350        struct sdio_func *func = ar_sdio->func;
 351        int ret;
 352
 353        sdio_claim_host(func);
 354
 355        len = round_down(len, ar_sdio->mbox_info.block_size);
 356
 357        ret = sdio_readsb(func, buf, addr, len);
 358        if (ret) {
 359                ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
 360                            addr, ret);
 361                goto out;
 362        }
 363
 364        ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
 365                   addr, buf, len);
 366        ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
 367
 368out:
 369        sdio_release_host(func);
 370
 371        return ret;
 372}
 373
 374/* HIF mbox functions */
 375
 376static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
 377                                              struct ath10k_sdio_rx_data *pkt,
 378                                              u32 *lookaheads,
 379                                              int *n_lookaheads)
 380{
 381        struct ath10k_htc *htc = &ar->htc;
 382        struct sk_buff *skb = pkt->skb;
 383        struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
 384        bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
 385        enum ath10k_htc_ep_id eid;
 386        u8 *trailer;
 387        int ret;
 388
 389        if (trailer_present) {
 390                trailer = skb->data + skb->len - htc_hdr->trailer_len;
 391
 392                eid = pipe_id_to_eid(htc_hdr->eid);
 393
 394                ret = ath10k_htc_process_trailer(htc,
 395                                                 trailer,
 396                                                 htc_hdr->trailer_len,
 397                                                 eid,
 398                                                 lookaheads,
 399                                                 n_lookaheads);
 400                if (ret)
 401                        return ret;
 402
 403                if (is_trailer_only_msg(pkt))
 404                        pkt->trailer_only = true;
 405
 406                skb_trim(skb, skb->len - htc_hdr->trailer_len);
 407        }
 408
 409        skb_pull(skb, sizeof(*htc_hdr));
 410
 411        return 0;
 412}
 413
 414static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
 415                                               u32 lookaheads[],
 416                                               int *n_lookahead)
 417{
 418        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 419        struct ath10k_htc *htc = &ar->htc;
 420        struct ath10k_sdio_rx_data *pkt;
 421        struct ath10k_htc_ep *ep;
 422        struct ath10k_skb_rxcb *cb;
 423        enum ath10k_htc_ep_id id;
 424        int ret, i, *n_lookahead_local;
 425        u32 *lookaheads_local;
 426        int lookahead_idx = 0;
 427
 428        for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
 429                lookaheads_local = lookaheads;
 430                n_lookahead_local = n_lookahead;
 431
 432                id = ((struct ath10k_htc_hdr *)
 433                      &lookaheads[lookahead_idx++])->eid;
 434
 435                if (id >= ATH10K_HTC_EP_COUNT) {
 436                        ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
 437                                    id);
 438                        ret = -ENOMEM;
 439                        goto out;
 440                }
 441
 442                ep = &htc->endpoint[id];
 443
 444                if (ep->service_id == 0) {
 445                        ath10k_warn(ar, "ep %d is not connected\n", id);
 446                        ret = -ENOMEM;
 447                        goto out;
 448                }
 449
 450                pkt = &ar_sdio->rx_pkts[i];
 451
 452                if (pkt->part_of_bundle && !pkt->last_in_bundle) {
 453                        /* Only read lookahead's from RX trailers
 454                         * for the last packet in a bundle.
 455                         */
 456                        lookahead_idx--;
 457                        lookaheads_local = NULL;
 458                        n_lookahead_local = NULL;
 459                }
 460
 461                ret = ath10k_sdio_mbox_rx_process_packet(ar,
 462                                                         pkt,
 463                                                         lookaheads_local,
 464                                                         n_lookahead_local);
 465                if (ret)
 466                        goto out;
 467
 468                if (!pkt->trailer_only) {
 469                        cb = ATH10K_SKB_RXCB(pkt->skb);
 470                        cb->eid = id;
 471
 472                        skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
 473                        queue_work(ar->workqueue_aux,
 474                                   &ar_sdio->async_work_rx);
 475                } else {
 476                        kfree_skb(pkt->skb);
 477                }
 478
 479                /* The RX complete handler now owns the skb...*/
 480                pkt->skb = NULL;
 481                pkt->alloc_len = 0;
 482        }
 483
 484        ret = 0;
 485
 486out:
 487        /* Free all packets that was not passed on to the RX completion
 488         * handler...
 489         */
 490        for (; i < ar_sdio->n_rx_pkts; i++)
 491                ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
 492
 493        return ret;
 494}
 495
 496static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
 497                                         struct ath10k_sdio_rx_data *rx_pkts,
 498                                         struct ath10k_htc_hdr *htc_hdr,
 499                                         size_t full_len, size_t act_len,
 500                                         size_t *bndl_cnt)
 501{
 502        int ret, i;
 503        u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
 504
 505        *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
 506
 507        if (*bndl_cnt > max_msgs) {
 508                ath10k_warn(ar,
 509                            "HTC bundle length %u exceeds maximum %u\n",
 510                            le16_to_cpu(htc_hdr->len),
 511                            max_msgs);
 512                return -ENOMEM;
 513        }
 514
 515        /* Allocate bndl_cnt extra skb's for the bundle.
 516         * The package containing the
 517         * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included
 518         * in bndl_cnt. The skb for that packet will be
 519         * allocated separately.
 520         */
 521        for (i = 0; i < *bndl_cnt; i++) {
 522                ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
 523                                                    act_len,
 524                                                    full_len,
 525                                                    true,
 526                                                    false);
 527                if (ret)
 528                        return ret;
 529        }
 530
 531        return 0;
 532}
 533
 534static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
 535                                     u32 lookaheads[], int n_lookaheads)
 536{
 537        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 538        struct ath10k_htc_hdr *htc_hdr;
 539        size_t full_len, act_len;
 540        bool last_in_bundle;
 541        int ret, i;
 542        int pkt_cnt = 0;
 543
 544        if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
 545                ath10k_warn(ar, "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
 546                            n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
 547                ret = -ENOMEM;
 548                goto err;
 549        }
 550
 551        for (i = 0; i < n_lookaheads; i++) {
 552                htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
 553                last_in_bundle = false;
 554
 555                if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
 556                        ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
 557                                    le16_to_cpu(htc_hdr->len),
 558                                    ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
 559                        ret = -ENOMEM;
 560                        goto err;
 561                }
 562
 563                act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
 564                full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
 565
 566                if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
 567                        ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
 568                                    htc_hdr->eid, htc_hdr->flags,
 569                                    le16_to_cpu(htc_hdr->len));
 570                        ret = -EINVAL;
 571                        goto err;
 572                }
 573
 574                if (ath10k_htc_get_bundle_count(
 575                        ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
 576                        /* HTC header indicates that every packet to follow
 577                         * has the same padded length so that it can be
 578                         * optimally fetched as a full bundle.
 579                         */
 580                        size_t bndl_cnt;
 581
 582                        ret = ath10k_sdio_mbox_alloc_bundle(ar,
 583                                                            &ar_sdio->rx_pkts[pkt_cnt],
 584                                                            htc_hdr,
 585                                                            full_len,
 586                                                            act_len,
 587                                                            &bndl_cnt);
 588
 589                        if (ret) {
 590                                ath10k_warn(ar, "failed to allocate a bundle: %d\n",
 591                                            ret);
 592                                goto err;
 593                        }
 594
 595                        pkt_cnt += bndl_cnt;
 596
 597                        /* next buffer will be the last in the bundle */
 598                        last_in_bundle = true;
 599                }
 600
 601                /* Allocate skb for packet. If the packet had the
 602                 * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
 603                 * packet skb's have been allocated in the previous step.
 604                 */
 605                if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
 606                        full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
 607
 608                ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
 609                                                    act_len,
 610                                                    full_len,
 611                                                    last_in_bundle,
 612                                                    last_in_bundle);
 613                if (ret) {
 614                        ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
 615                        goto err;
 616                }
 617
 618                pkt_cnt++;
 619        }
 620
 621        ar_sdio->n_rx_pkts = pkt_cnt;
 622
 623        return 0;
 624
 625err:
 626        for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
 627                if (!ar_sdio->rx_pkts[i].alloc_len)
 628                        break;
 629                ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
 630        }
 631
 632        return ret;
 633}
 634
 635static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
 636{
 637        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 638        struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
 639        struct sk_buff *skb = pkt->skb;
 640        struct ath10k_htc_hdr *htc_hdr;
 641        int ret;
 642
 643        ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
 644                                 skb->data, pkt->alloc_len);
 645        if (ret)
 646                goto err;
 647
 648        htc_hdr = (struct ath10k_htc_hdr *)skb->data;
 649        pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
 650
 651        if (pkt->act_len > pkt->alloc_len) {
 652                ret = -EINVAL;
 653                goto err;
 654        }
 655
 656        skb_put(skb, pkt->act_len);
 657        return 0;
 658
 659err:
 660        ar_sdio->n_rx_pkts = 0;
 661        ath10k_sdio_mbox_free_rx_pkt(pkt);
 662
 663        return ret;
 664}
 665
 666static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
 667{
 668        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 669        struct ath10k_sdio_rx_data *pkt;
 670        struct ath10k_htc_hdr *htc_hdr;
 671        int ret, i;
 672        u32 pkt_offset, virt_pkt_len;
 673
 674        virt_pkt_len = 0;
 675        for (i = 0; i < ar_sdio->n_rx_pkts; i++)
 676                virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
 677
 678        if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
 679                ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
 680                ret = -E2BIG;
 681                goto err;
 682        }
 683
 684        ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
 685                                 ar_sdio->vsg_buffer, virt_pkt_len);
 686        if (ret) {
 687                ath10k_warn(ar, "failed to read bundle packets: %d", ret);
 688                goto err;
 689        }
 690
 691        pkt_offset = 0;
 692        for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
 693                pkt = &ar_sdio->rx_pkts[i];
 694                htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
 695                pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
 696
 697                if (pkt->act_len > pkt->alloc_len ) {
 698                        ret = -EINVAL;
 699                        goto err;
 700                }
 701
 702                skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
 703                pkt_offset += pkt->alloc_len;
 704        }
 705
 706        return 0;
 707
 708err:
 709        /* Free all packets that was not successfully fetched. */
 710        for (i = 0; i < ar_sdio->n_rx_pkts; i++)
 711                ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
 712
 713        ar_sdio->n_rx_pkts = 0;
 714
 715        return ret;
 716}
 717
 718/* This is the timeout for mailbox processing done in the sdio irq
 719 * handler. The timeout is deliberately set quite high since SDIO dump logs
 720 * over serial port can/will add a substantial overhead to the processing
 721 * (if enabled).
 722 */
 723#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
 724
 725static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
 726                                                  u32 msg_lookahead, bool *done)
 727{
 728        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 729        u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
 730        int n_lookaheads = 1;
 731        unsigned long timeout;
 732        int ret;
 733
 734        *done = true;
 735
 736        /* Copy the lookahead obtained from the HTC register table into our
 737         * temp array as a start value.
 738         */
 739        lookaheads[0] = msg_lookahead;
 740
 741        timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
 742        do {
 743                /* Try to allocate as many HTC RX packets indicated by
 744                 * n_lookaheads.
 745                 */
 746                ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
 747                                                n_lookaheads);
 748                if (ret)
 749                        break;
 750
 751                if (ar_sdio->n_rx_pkts >= 2)
 752                        /* A recv bundle was detected, force IRQ status
 753                         * re-check again.
 754                         */
 755                        *done = false;
 756
 757                if (ar_sdio->n_rx_pkts > 1)
 758                        ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
 759                else
 760                        ret = ath10k_sdio_mbox_rx_fetch(ar);
 761
 762                /* Process fetched packets. This will potentially update
 763                 * n_lookaheads depending on if the packets contain lookahead
 764                 * reports.
 765                 */
 766                n_lookaheads = 0;
 767                ret = ath10k_sdio_mbox_rx_process_packets(ar,
 768                                                          lookaheads,
 769                                                          &n_lookaheads);
 770
 771                if (!n_lookaheads || ret)
 772                        break;
 773
 774                /* For SYNCH processing, if we get here, we are running
 775                 * through the loop again due to updated lookaheads. Set
 776                 * flag that we should re-check IRQ status registers again
 777                 * before leaving IRQ processing, this can net better
 778                 * performance in high throughput situations.
 779                 */
 780                *done = false;
 781        } while (time_before(jiffies, timeout));
 782
 783        if (ret && (ret != -ECANCELED))
 784                ath10k_warn(ar, "failed to get pending recv messages: %d\n",
 785                            ret);
 786
 787        return ret;
 788}
 789
 790static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
 791{
 792        u32 val;
 793        int ret;
 794
 795        /* TODO: Add firmware crash handling */
 796        ath10k_warn(ar, "firmware crashed\n");
 797
 798        /* read counter to clear the interrupt, the debug error interrupt is
 799         * counter 0.
 800         */
 801        ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
 802        if (ret)
 803                ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
 804
 805        return ret;
 806}
 807
 808static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
 809{
 810        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 811        struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 812        u8 counter_int_status;
 813        int ret;
 814
 815        mutex_lock(&irq_data->mtx);
 816        counter_int_status = irq_data->irq_proc_reg->counter_int_status &
 817                             irq_data->irq_en_reg->cntr_int_status_en;
 818
 819        /* NOTE: other modules like GMBOX may use the counter interrupt for
 820         * credit flow control on other counters, we only need to check for
 821         * the debug assertion counter interrupt.
 822         */
 823        if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
 824                ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
 825        else
 826                ret = 0;
 827
 828        mutex_unlock(&irq_data->mtx);
 829
 830        return ret;
 831}
 832
 833static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
 834{
 835        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 836        struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 837        u8 error_int_status;
 838        int ret;
 839
 840        ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
 841
 842        error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
 843        if (!error_int_status) {
 844                ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
 845                            error_int_status);
 846                return -EIO;
 847        }
 848
 849        ath10k_dbg(ar, ATH10K_DBG_SDIO,
 850                   "sdio error_int_status 0x%x\n", error_int_status);
 851
 852        if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
 853                      error_int_status))
 854                ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
 855
 856        if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
 857                      error_int_status))
 858                ath10k_warn(ar, "rx underflow interrupt error\n");
 859
 860        if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
 861                      error_int_status))
 862                ath10k_warn(ar, "tx overflow interrupt error\n");
 863
 864        /* Clear the interrupt */
 865        irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
 866
 867        /* set W1C value to clear the interrupt, this hits the register first */
 868        ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
 869                                    error_int_status);
 870        if (ret) {
 871                ath10k_warn(ar, "unable to write to error int status address: %d\n",
 872                            ret);
 873                return ret;
 874        }
 875
 876        return 0;
 877}
 878
 879static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
 880{
 881        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 882        struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 883        u8 cpu_int_status;
 884        int ret;
 885
 886        mutex_lock(&irq_data->mtx);
 887        cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
 888                         irq_data->irq_en_reg->cpu_int_status_en;
 889        if (!cpu_int_status) {
 890                ath10k_warn(ar, "CPU interrupt status is zero\n");
 891                ret = -EIO;
 892                goto out;
 893        }
 894
 895        /* Clear the interrupt */
 896        irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
 897
 898        /* Set up the register transfer buffer to hit the register 4 times,
 899         * this is done to make the access 4-byte aligned to mitigate issues
 900         * with host bus interconnects that restrict bus transfer lengths to
 901         * be a multiple of 4-bytes.
 902         *
 903         * Set W1C value to clear the interrupt, this hits the register first.
 904         */
 905        ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
 906                                    cpu_int_status);
 907        if (ret) {
 908                ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
 909                            ret);
 910                goto out;
 911        }
 912
 913out:
 914        mutex_unlock(&irq_data->mtx);
 915        if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK) {
 916                ath10k_err(ar, "firmware crashed!\n");
 917                queue_work(ar->workqueue, &ar->restart_work);
 918        }
 919        return ret;
 920}
 921
 922static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
 923                                            u8 *host_int_status,
 924                                            u32 *lookahead)
 925{
 926        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
 927        struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
 928        struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
 929        struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
 930        u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
 931        int ret;
 932
 933        mutex_lock(&irq_data->mtx);
 934
 935        *lookahead = 0;
 936        *host_int_status = 0;
 937
 938        /* int_status_en is supposed to be non zero, otherwise interrupts
 939         * shouldn't be enabled. There is however a short time frame during
 940         * initialization between the irq register and int_status_en init
 941         * where this can happen.
 942         * We silently ignore this condition.
 943         */
 944        if (!irq_en_reg->int_status_en) {
 945                ret = 0;
 946                goto out;
 947        }
 948
 949        /* Read the first sizeof(struct ath10k_irq_proc_registers)
 950         * bytes of the HTC register table. This
 951         * will yield us the value of different int status
 952         * registers and the lookahead registers.
 953         */
 954        ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
 955                               irq_proc_reg, sizeof(*irq_proc_reg));
 956        if (ret)
 957                goto out;
 958
 959        /* Update only those registers that are enabled */
 960        *host_int_status = irq_proc_reg->host_int_status &
 961                           irq_en_reg->int_status_en;
 962
 963        /* Look at mbox status */
 964        if (!(*host_int_status & htc_mbox)) {
 965                *lookahead = 0;
 966                ret = 0;
 967                goto out;
 968        }
 969
 970        /* Mask out pending mbox value, we use look ahead as
 971         * the real flag for mbox processing.
 972         */
 973        *host_int_status &= ~htc_mbox;
 974        if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
 975                *lookahead = le32_to_cpu(
 976                        irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
 977                if (!*lookahead)
 978                        ath10k_warn(ar, "sdio mbox lookahead is zero\n");
 979        }
 980
 981out:
 982        mutex_unlock(&irq_data->mtx);
 983        return ret;
 984}
 985
 986static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
 987                                              bool *done)
 988{
 989        u8 host_int_status;
 990        u32 lookahead;
 991        int ret;
 992
 993        /* NOTE: HIF implementation guarantees that the context of this
 994         * call allows us to perform SYNCHRONOUS I/O, that is we can block,
 995         * sleep or call any API that can block or switch thread/task
 996         * contexts. This is a fully schedulable context.
 997         */
 998
 999        ret = ath10k_sdio_mbox_read_int_status(ar,
1000                                               &host_int_status,
1001                                               &lookahead);
1002        if (ret) {
1003                *done = true;
1004                goto out;
1005        }
1006
1007        if (!host_int_status && !lookahead) {
1008                ret = 0;
1009                *done = true;
1010                goto out;
1011        }
1012
1013        if (lookahead) {
1014                ath10k_dbg(ar, ATH10K_DBG_SDIO,
1015                           "sdio pending mailbox msg lookahead 0x%08x\n",
1016                           lookahead);
1017
1018                ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
1019                                                             lookahead,
1020                                                             done);
1021                if (ret)
1022                        goto out;
1023        }
1024
1025        /* now, handle the rest of the interrupts */
1026        ath10k_dbg(ar, ATH10K_DBG_SDIO,
1027                   "sdio host_int_status 0x%x\n", host_int_status);
1028
1029        if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
1030                /* CPU Interrupt */
1031                ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
1032                if (ret)
1033                        goto out;
1034        }
1035
1036        if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
1037                /* Error Interrupt */
1038                ret = ath10k_sdio_mbox_proc_err_intr(ar);
1039                if (ret)
1040                        goto out;
1041        }
1042
1043        if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
1044                /* Counter Interrupt */
1045                ret = ath10k_sdio_mbox_proc_counter_intr(ar);
1046
1047        ret = 0;
1048
1049out:
1050        /* An optimization to bypass reading the IRQ status registers
1051         * unecessarily which can re-wake the target, if upper layers
1052         * determine that we are in a low-throughput mode, we can rely on
1053         * taking another interrupt rather than re-checking the status
1054         * registers which can re-wake the target.
1055         *
1056         * NOTE : for host interfaces that makes use of detecting pending
1057         * mbox messages at hif can not use this optimization due to
1058         * possible side effects, SPI requires the host to drain all
1059         * messages from the mailbox before exiting the ISR routine.
1060         */
1061
1062        ath10k_dbg(ar, ATH10K_DBG_SDIO,
1063                   "sdio pending irqs done %d status %d",
1064                   *done, ret);
1065
1066        return ret;
1067}
1068
1069static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
1070{
1071        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1072        struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
1073        u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
1074
1075        mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
1076        mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
1077        mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
1078        mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
1079        mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
1080
1081        mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
1082
1083        dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, device);
1084        dev_id_chiprev = FIELD_GET(QCA_MANUFACTURER_ID_REV_MASK, device);
1085        switch (dev_id_base) {
1086        case QCA_MANUFACTURER_ID_AR6005_BASE:
1087                if (dev_id_chiprev < 4)
1088                        mbox_info->ext_info[0].htc_ext_sz =
1089                                ATH10K_HIF_MBOX0_EXT_WIDTH;
1090                else
1091                        /* from QCA6174 2.0(0x504), the width has been extended
1092                         * to 56K
1093                         */
1094                        mbox_info->ext_info[0].htc_ext_sz =
1095                                ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1096                break;
1097        case QCA_MANUFACTURER_ID_QCA9377_BASE:
1098                mbox_info->ext_info[0].htc_ext_sz =
1099                        ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
1100                break;
1101        default:
1102                mbox_info->ext_info[0].htc_ext_sz =
1103                                ATH10K_HIF_MBOX0_EXT_WIDTH;
1104        }
1105
1106        mbox_info->ext_info[1].htc_ext_addr =
1107                mbox_info->ext_info[0].htc_ext_addr +
1108                mbox_info->ext_info[0].htc_ext_sz +
1109                ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
1110        mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
1111}
1112
1113/* BMI functions */
1114
1115static int ath10k_sdio_bmi_credits(struct ath10k *ar)
1116{
1117        u32 addr, cmd_credits;
1118        unsigned long timeout;
1119        int ret;
1120
1121        /* Read the counter register to get the command credits */
1122        addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
1123        timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1124        cmd_credits = 0;
1125
1126        while (time_before(jiffies, timeout) && !cmd_credits) {
1127                /* Hit the credit counter with a 4-byte access, the first byte
1128                 * read will hit the counter and cause a decrement, while the
1129                 * remaining 3 bytes has no effect. The rationale behind this
1130                 * is to make all HIF accesses 4-byte aligned.
1131                 */
1132                ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
1133                if (ret) {
1134                        ath10k_warn(ar,
1135                                    "unable to decrement the command credit count register: %d\n",
1136                                    ret);
1137                        return ret;
1138                }
1139
1140                /* The counter is only 8 bits.
1141                 * Ignore anything in the upper 3 bytes
1142                 */
1143                cmd_credits &= 0xFF;
1144        }
1145
1146        if (!cmd_credits) {
1147                ath10k_warn(ar, "bmi communication timeout\n");
1148                return -ETIMEDOUT;
1149        }
1150
1151        return 0;
1152}
1153
1154static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
1155{
1156        unsigned long timeout;
1157        u32 rx_word;
1158        int ret;
1159
1160        timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1161        rx_word = 0;
1162
1163        while ((time_before(jiffies, timeout)) && !rx_word) {
1164                ret = ath10k_sdio_read32(ar,
1165                                         MBOX_HOST_INT_STATUS_ADDRESS,
1166                                         &rx_word);
1167                if (ret) {
1168                        ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
1169                        return ret;
1170                }
1171
1172                 /* all we really want is one bit */
1173                rx_word &= 1;
1174        }
1175
1176        if (!rx_word) {
1177                ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
1178                return -EINVAL;
1179        }
1180
1181        return ret;
1182}
1183
1184static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
1185                                        void *req, u32 req_len,
1186                                        void *resp, u32 *resp_len)
1187{
1188        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1189        u32 addr;
1190        int ret;
1191
1192        if (req) {
1193                ret = ath10k_sdio_bmi_credits(ar);
1194                if (ret)
1195                        return ret;
1196
1197                addr = ar_sdio->mbox_info.htc_addr;
1198
1199                memcpy(ar_sdio->bmi_buf, req, req_len);
1200                ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
1201                if (ret) {
1202                        ath10k_warn(ar,
1203                                    "unable to send the bmi data to the device: %d\n",
1204                                    ret);
1205                        return ret;
1206                }
1207        }
1208
1209        if (!resp || !resp_len)
1210                /* No response expected */
1211                return 0;
1212
1213        /* During normal bootup, small reads may be required.
1214         * Rather than issue an HIF Read and then wait as the Target
1215         * adds successive bytes to the FIFO, we wait here until
1216         * we know that response data is available.
1217         *
1218         * This allows us to cleanly timeout on an unexpected
1219         * Target failure rather than risk problems at the HIF level.
1220         * In particular, this avoids SDIO timeouts and possibly garbage
1221         * data on some host controllers.  And on an interconnect
1222         * such as Compact Flash (as well as some SDIO masters) which
1223         * does not provide any indication on data timeout, it avoids
1224         * a potential hang or garbage response.
1225         *
1226         * Synchronization is more difficult for reads larger than the
1227         * size of the MBOX FIFO (128B), because the Target is unable
1228         * to push the 129th byte of data until AFTER the Host posts an
1229         * HIF Read and removes some FIFO data.  So for large reads the
1230         * Host proceeds to post an HIF Read BEFORE all the data is
1231         * actually available to read.  Fortunately, large BMI reads do
1232         * not occur in practice -- they're supported for debug/development.
1233         *
1234         * So Host/Target BMI synchronization is divided into these cases:
1235         *  CASE 1: length < 4
1236         *        Should not happen
1237         *
1238         *  CASE 2: 4 <= length <= 128
1239         *        Wait for first 4 bytes to be in FIFO
1240         *        If CONSERVATIVE_BMI_READ is enabled, also wait for
1241         *        a BMI command credit, which indicates that the ENTIRE
1242         *        response is available in the the FIFO
1243         *
1244         *  CASE 3: length > 128
1245         *        Wait for the first 4 bytes to be in FIFO
1246         *
1247         * For most uses, a small timeout should be sufficient and we will
1248         * usually see a response quickly; but there may be some unusual
1249         * (debug) cases of BMI_EXECUTE where we want an larger timeout.
1250         * For now, we use an unbounded busy loop while waiting for
1251         * BMI_EXECUTE.
1252         *
1253         * If BMI_EXECUTE ever needs to support longer-latency execution,
1254         * especially in production, this code needs to be enhanced to sleep
1255         * and yield.  Also note that BMI_COMMUNICATION_TIMEOUT is currently
1256         * a function of Host processor speed.
1257         */
1258        ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
1259        if (ret)
1260                return ret;
1261
1262        /* We always read from the start of the mbox address */
1263        addr = ar_sdio->mbox_info.htc_addr;
1264        ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
1265        if (ret) {
1266                ath10k_warn(ar,
1267                            "unable to read the bmi data from the device: %d\n",
1268                            ret);
1269                return ret;
1270        }
1271
1272        memcpy(resp, ar_sdio->bmi_buf, *resp_len);
1273
1274        return 0;
1275}
1276
1277/* sdio async handling functions */
1278
1279static struct ath10k_sdio_bus_request
1280*ath10k_sdio_alloc_busreq(struct ath10k *ar)
1281{
1282        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1283        struct ath10k_sdio_bus_request *bus_req;
1284
1285        spin_lock_bh(&ar_sdio->lock);
1286
1287        if (list_empty(&ar_sdio->bus_req_freeq)) {
1288                bus_req = NULL;
1289                goto out;
1290        }
1291
1292        bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
1293                                   struct ath10k_sdio_bus_request, list);
1294        list_del(&bus_req->list);
1295
1296out:
1297        spin_unlock_bh(&ar_sdio->lock);
1298        return bus_req;
1299}
1300
1301static void ath10k_sdio_free_bus_req(struct ath10k *ar,
1302                                     struct ath10k_sdio_bus_request *bus_req)
1303{
1304        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1305
1306        memset(bus_req, 0, sizeof(*bus_req));
1307
1308        spin_lock_bh(&ar_sdio->lock);
1309        list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
1310        spin_unlock_bh(&ar_sdio->lock);
1311}
1312
1313static void __ath10k_sdio_write_async(struct ath10k *ar,
1314                                      struct ath10k_sdio_bus_request *req)
1315{
1316        struct ath10k_htc_ep *ep;
1317        struct sk_buff *skb;
1318        int ret;
1319
1320        skb = req->skb;
1321        ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
1322        if (ret)
1323                ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
1324                            req->address, ret);
1325
1326        if (req->htc_msg) {
1327                ep = &ar->htc.endpoint[req->eid];
1328                ath10k_htc_notify_tx_completion(ep, skb);
1329        } else if (req->comp) {
1330                complete(req->comp);
1331        }
1332
1333        ath10k_sdio_free_bus_req(ar, req);
1334}
1335
1336/* To improve throughput use workqueue to deliver packets to HTC layer,
1337 * this way SDIO bus is utilised much better.
1338 */
1339static void ath10k_rx_indication_async_work(struct work_struct *work)
1340{
1341        struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1342                                                   async_work_rx);
1343        struct ath10k *ar = ar_sdio->ar;
1344        struct ath10k_htc_ep *ep;
1345        struct ath10k_skb_rxcb *cb;
1346        struct sk_buff *skb;
1347
1348        while (true) {
1349                skb = skb_dequeue(&ar_sdio->rx_head);
1350                if (!skb)
1351                        break;
1352                cb = ATH10K_SKB_RXCB(skb);
1353                ep = &ar->htc.endpoint[cb->eid];
1354                ep->ep_ops.ep_rx_complete(ar, skb);
1355        }
1356
1357        if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
1358                napi_schedule(&ar->napi);
1359}
1360
1361static void ath10k_sdio_write_async_work(struct work_struct *work)
1362{
1363        struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1364                                                   wr_async_work);
1365        struct ath10k *ar = ar_sdio->ar;
1366        struct ath10k_sdio_bus_request *req, *tmp_req;
1367
1368        spin_lock_bh(&ar_sdio->wr_async_lock);
1369
1370        list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1371                list_del(&req->list);
1372                spin_unlock_bh(&ar_sdio->wr_async_lock);
1373                __ath10k_sdio_write_async(ar, req);
1374                spin_lock_bh(&ar_sdio->wr_async_lock);
1375        }
1376
1377        spin_unlock_bh(&ar_sdio->wr_async_lock);
1378}
1379
1380static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
1381                                      struct sk_buff *skb,
1382                                      struct completion *comp,
1383                                      bool htc_msg, enum ath10k_htc_ep_id eid)
1384{
1385        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1386        struct ath10k_sdio_bus_request *bus_req;
1387
1388        /* Allocate a bus request for the message and queue it on the
1389         * SDIO workqueue.
1390         */
1391        bus_req = ath10k_sdio_alloc_busreq(ar);
1392        if (!bus_req) {
1393                ath10k_warn(ar,
1394                            "unable to allocate bus request for async request\n");
1395                return -ENOMEM;
1396        }
1397
1398        bus_req->skb = skb;
1399        bus_req->eid = eid;
1400        bus_req->address = addr;
1401        bus_req->htc_msg = htc_msg;
1402        bus_req->comp = comp;
1403
1404        spin_lock_bh(&ar_sdio->wr_async_lock);
1405        list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
1406        spin_unlock_bh(&ar_sdio->wr_async_lock);
1407
1408        return 0;
1409}
1410
1411/* IRQ handler */
1412
1413static void ath10k_sdio_irq_handler(struct sdio_func *func)
1414{
1415        struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
1416        struct ath10k *ar = ar_sdio->ar;
1417        unsigned long timeout;
1418        bool done = false;
1419        int ret;
1420
1421        /* Release the host during interrupts so we can pick it back up when
1422         * we process commands.
1423         */
1424        sdio_release_host(ar_sdio->func);
1425
1426        timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
1427        do {
1428                ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
1429                if (ret)
1430                        break;
1431        } while (time_before(jiffies, timeout) && !done);
1432
1433        ath10k_mac_tx_push_pending(ar);
1434
1435        sdio_claim_host(ar_sdio->func);
1436
1437        if (ret && ret != -ECANCELED)
1438                ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
1439                            ret);
1440}
1441
1442/* sdio HIF functions */
1443
1444static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
1445{
1446        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1447        struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1448        struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1449        int ret;
1450
1451        mutex_lock(&irq_data->mtx);
1452
1453        memset(regs, 0, sizeof(*regs));
1454        ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1455                                &regs->int_status_en, sizeof(*regs));
1456        if (ret)
1457                ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
1458
1459        mutex_unlock(&irq_data->mtx);
1460
1461        return ret;
1462}
1463
1464static int ath10k_sdio_hif_power_up(struct ath10k *ar,
1465                                    enum ath10k_firmware_mode fw_mode)
1466{
1467        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1468        struct sdio_func *func = ar_sdio->func;
1469        int ret;
1470
1471        if (!ar_sdio->is_disabled)
1472                return 0;
1473
1474        ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
1475
1476        ret = ath10k_sdio_config(ar);
1477        if (ret) {
1478                ath10k_err(ar, "failed to config sdio: %d\n", ret);
1479                return ret;
1480        }
1481
1482        sdio_claim_host(func);
1483
1484        ret = sdio_enable_func(func);
1485        if (ret) {
1486                ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
1487                sdio_release_host(func);
1488                return ret;
1489        }
1490
1491        sdio_release_host(func);
1492
1493        /* Wait for hardware to initialise. It should take a lot less than
1494         * 20 ms but let's be conservative here.
1495         */
1496        msleep(20);
1497
1498        ar_sdio->is_disabled = false;
1499
1500        ret = ath10k_sdio_hif_disable_intrs(ar);
1501        if (ret)
1502                return ret;
1503
1504        return 0;
1505}
1506
1507static void ath10k_sdio_hif_power_down(struct ath10k *ar)
1508{
1509        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1510        int ret;
1511
1512        if (ar_sdio->is_disabled)
1513                return;
1514
1515        ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
1516
1517        /* Disable the card */
1518        sdio_claim_host(ar_sdio->func);
1519
1520        ret = sdio_disable_func(ar_sdio->func);
1521        if (ret) {
1522                ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
1523                sdio_release_host(ar_sdio->func);
1524                return;
1525        }
1526
1527        ret = mmc_hw_reset(ar_sdio->func->card->host);
1528        if (ret)
1529                ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
1530
1531        sdio_release_host(ar_sdio->func);
1532
1533        ar_sdio->is_disabled = true;
1534}
1535
1536static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1537                                 struct ath10k_hif_sg_item *items, int n_items)
1538{
1539        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1540        enum ath10k_htc_ep_id eid;
1541        struct sk_buff *skb;
1542        int ret, i;
1543
1544        eid = pipe_id_to_eid(pipe_id);
1545
1546        for (i = 0; i < n_items; i++) {
1547                size_t padded_len;
1548                u32 address;
1549
1550                skb = items[i].transfer_context;
1551                padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
1552                                                              skb->len);
1553                skb_trim(skb, padded_len);
1554
1555                /* Write TX data to the end of the mbox address space */
1556                address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
1557                          skb->len;
1558                ret = ath10k_sdio_prep_async_req(ar, address, skb,
1559                                                 NULL, true, eid);
1560                if (ret)
1561                        return ret;
1562        }
1563
1564        queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1565
1566        return 0;
1567}
1568
1569static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
1570{
1571        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1572        struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1573        struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1574        int ret;
1575
1576        mutex_lock(&irq_data->mtx);
1577
1578        /* Enable all but CPU interrupts */
1579        regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
1580                              FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
1581                              FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
1582
1583        /* NOTE: There are some cases where HIF can do detection of
1584         * pending mbox messages which is disabled now.
1585         */
1586        regs->int_status_en |=
1587                FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
1588
1589        /* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #0
1590         * #0 is used for report assertion from target
1591         */
1592        regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
1593
1594        /* Set up the Error Interrupt status Register */
1595        regs->err_int_status_en =
1596                FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
1597                FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
1598
1599        /* Enable Counter interrupt status register to get fatal errors for
1600         * debugging.
1601         */
1602        regs->cntr_int_status_en =
1603                FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
1604                           ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
1605
1606        ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1607                                &regs->int_status_en, sizeof(*regs));
1608        if (ret)
1609                ath10k_warn(ar,
1610                            "failed to update mbox interrupt status register : %d\n",
1611                            ret);
1612
1613        mutex_unlock(&irq_data->mtx);
1614        return ret;
1615}
1616
1617static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1618{
1619        u32 val;
1620        int ret;
1621
1622        ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1623        if (ret) {
1624                ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1625                            ret);
1626                return ret;
1627        }
1628
1629        if (enable_sleep)
1630                val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1631        else
1632                val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1633
1634        ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1635        if (ret) {
1636                ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1637                            ret);
1638                return ret;
1639        }
1640
1641        return 0;
1642}
1643
1644/* HIF diagnostics */
1645
1646static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1647                                     size_t buf_len)
1648{
1649        int ret;
1650
1651        /* set window register to start read cycle */
1652        ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
1653        if (ret) {
1654                ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
1655                return ret;
1656        }
1657
1658        /* read the data */
1659        ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
1660        if (ret) {
1661                ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
1662                            ret);
1663                return ret;
1664        }
1665
1666        return 0;
1667}
1668
1669static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
1670                                       u32 *value)
1671{
1672        __le32 *val;
1673        int ret;
1674
1675        val = kzalloc(sizeof(*val), GFP_KERNEL);
1676        if (!val)
1677                return -ENOMEM;
1678
1679        ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
1680        if (ret)
1681                goto out;
1682
1683        *value = __le32_to_cpu(*val);
1684
1685out:
1686        kfree(val);
1687
1688        return ret;
1689}
1690
1691static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
1692                                          const void *data, int nbytes)
1693{
1694        int ret;
1695
1696        /* set write data */
1697        ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
1698        if (ret) {
1699                ath10k_warn(ar,
1700                            "failed to write 0x%p to mbox window data address: %d\n",
1701                            data, ret);
1702                return ret;
1703        }
1704
1705        /* set window register, which starts the write cycle */
1706        ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
1707        if (ret) {
1708                ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
1709                return ret;
1710        }
1711
1712        return 0;
1713}
1714
1715static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar)
1716{
1717        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1718        u32 addr, val;
1719        int ret = 0;
1720
1721        addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1722
1723        ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
1724        if (ret) {
1725                ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
1726                return ret;
1727        }
1728
1729        if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1730                ath10k_dbg(ar, ATH10K_DBG_SDIO,
1731                           "sdio mailbox swap service enabled\n");
1732                ar_sdio->swap_mbox = true;
1733        } else {
1734                ath10k_dbg(ar, ATH10K_DBG_SDIO,
1735                           "sdio mailbox swap service disabled\n");
1736                ar_sdio->swap_mbox = false;
1737        }
1738
1739        return 0;
1740}
1741
1742/* HIF start/stop */
1743
1744static int ath10k_sdio_hif_start(struct ath10k *ar)
1745{
1746        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1747        int ret;
1748
1749        napi_enable(&ar->napi);
1750
1751        /* Sleep 20 ms before HIF interrupts are disabled.
1752         * This will give target plenty of time to process the BMI done
1753         * request before interrupts are disabled.
1754         */
1755        msleep(20);
1756        ret = ath10k_sdio_hif_disable_intrs(ar);
1757        if (ret)
1758                return ret;
1759
1760        /* eid 0 always uses the lower part of the extended mailbox address
1761         * space (ext_info[0].htc_ext_addr).
1762         */
1763        ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1764        ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1765
1766        sdio_claim_host(ar_sdio->func);
1767
1768        /* Register the isr */
1769        ret =  sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
1770        if (ret) {
1771                ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
1772                sdio_release_host(ar_sdio->func);
1773                return ret;
1774        }
1775
1776        sdio_release_host(ar_sdio->func);
1777
1778        ret = ath10k_sdio_hif_enable_intrs(ar);
1779        if (ret)
1780                ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
1781
1782        /* Enable sleep and then disable it again */
1783        ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
1784        if (ret)
1785                return ret;
1786
1787        /* Wait for 20ms for the written value to take effect */
1788        msleep(20);
1789
1790        ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
1791        if (ret)
1792                return ret;
1793
1794        return 0;
1795}
1796
1797#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
1798
1799static void ath10k_sdio_irq_disable(struct ath10k *ar)
1800{
1801        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1802        struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
1803        struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
1804        struct sk_buff *skb;
1805        struct completion irqs_disabled_comp;
1806        int ret;
1807
1808        skb = dev_alloc_skb(sizeof(*regs));
1809        if (!skb)
1810                return;
1811
1812        mutex_lock(&irq_data->mtx);
1813
1814        memset(regs, 0, sizeof(*regs)); /* disable all interrupts */
1815        memcpy(skb->data, regs, sizeof(*regs));
1816        skb_put(skb, sizeof(*regs));
1817
1818        mutex_unlock(&irq_data->mtx);
1819
1820        init_completion(&irqs_disabled_comp);
1821        ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
1822                                         skb, &irqs_disabled_comp, false, 0);
1823        if (ret)
1824                goto out;
1825
1826        queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1827
1828        /* Wait for the completion of the IRQ disable request.
1829         * If there is a timeout we will try to disable irq's anyway.
1830         */
1831        ret = wait_for_completion_timeout(&irqs_disabled_comp,
1832                                          SDIO_IRQ_DISABLE_TIMEOUT_HZ);
1833        if (!ret)
1834                ath10k_warn(ar, "sdio irq disable request timed out\n");
1835
1836        sdio_claim_host(ar_sdio->func);
1837
1838        ret = sdio_release_irq(ar_sdio->func);
1839        if (ret)
1840                ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
1841
1842        sdio_release_host(ar_sdio->func);
1843
1844out:
1845        kfree_skb(skb);
1846}
1847
1848static void ath10k_sdio_hif_stop(struct ath10k *ar)
1849{
1850        struct ath10k_sdio_bus_request *req, *tmp_req;
1851        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1852
1853        ath10k_sdio_irq_disable(ar);
1854
1855        cancel_work_sync(&ar_sdio->wr_async_work);
1856
1857        spin_lock_bh(&ar_sdio->wr_async_lock);
1858
1859        /* Free all bus requests that have not been handled */
1860        list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
1861                struct ath10k_htc_ep *ep;
1862
1863                list_del(&req->list);
1864
1865                if (req->htc_msg) {
1866                        ep = &ar->htc.endpoint[req->eid];
1867                        ath10k_htc_notify_tx_completion(ep, req->skb);
1868                } else if (req->skb) {
1869                        kfree_skb(req->skb);
1870                }
1871                ath10k_sdio_free_bus_req(ar, req);
1872        }
1873
1874        spin_unlock_bh(&ar_sdio->wr_async_lock);
1875
1876        napi_synchronize(&ar->napi);
1877        napi_disable(&ar->napi);
1878}
1879
1880#ifdef CONFIG_PM
1881
1882static int ath10k_sdio_hif_suspend(struct ath10k *ar)
1883{
1884        return 0;
1885}
1886
1887static int ath10k_sdio_hif_resume(struct ath10k *ar)
1888{
1889        switch (ar->state) {
1890        case ATH10K_STATE_OFF:
1891                ath10k_dbg(ar, ATH10K_DBG_SDIO,
1892                           "sdio resume configuring sdio\n");
1893
1894                /* need to set sdio settings after power is cut from sdio */
1895                ath10k_sdio_config(ar);
1896                break;
1897
1898        case ATH10K_STATE_ON:
1899        default:
1900                break;
1901        }
1902
1903        return 0;
1904}
1905#endif
1906
1907static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
1908                                               u16 service_id,
1909                                               u8 *ul_pipe, u8 *dl_pipe)
1910{
1911        struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1912        struct ath10k_htc *htc = &ar->htc;
1913        u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
1914        enum ath10k_htc_ep_id eid;
1915        bool ep_found = false;
1916        int i;
1917
1918        /* For sdio, we are interested in the mapping between eid
1919         * and pipeid rather than service_id to pipe_id.
1920         * First we find out which eid has been allocated to the
1921         * service...
1922         */
1923        for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
1924                if (htc->endpoint[i].service_id == service_id) {
1925                        eid = htc->endpoint[i].eid;
1926                        ep_found = true;
1927                        break;
1928                }
1929        }
1930
1931        if (!ep_found)
1932                return -EINVAL;
1933
1934        /* Then we create the simplest mapping possible between pipeid
1935         * and eid
1936         */
1937        *ul_pipe = *dl_pipe = (u8)eid;
1938
1939        /* Normally, HTT will use the upper part of the extended
1940         * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl
1941         * the lower part (ext_info[0].htc_ext_addr).
1942         * If fw wants swapping of mailbox addresses, the opposite is true.
1943         */
1944        if (ar_sdio->swap_mbox) {
1945                htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1946                wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
1947                htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1948                wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
1949        } else {
1950                htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
1951                wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
1952                htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
1953                wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
1954        }
1955
1956        switch (service_id) {
1957        case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1958                /* HTC ctrl ep mbox address has already been setup in
1959                 * ath10k_sdio_hif_start
1960                 */
1961                break;
1962        case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1963                ar_sdio->mbox_addr[eid] = wmi_addr;
1964                ar_sdio->mbox_size[eid] = wmi_mbox_size;
1965                ath10k_dbg(ar, ATH10K_DBG_SDIO,
1966                           "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
1967                           ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
1968                break;
1969        case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1970                ar_sdio->mbox_addr[eid] = htt_addr;
1971                ar_sdio->mbox_size[eid] = htt_mbox_size;
1972                ath10k_dbg(ar, ATH10K_DBG_SDIO,
1973                           "sdio htt data mbox_addr 0x%x mbox_size %d\n",
1974                           ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
1975                break;
1976        default:
1977                ath10k_warn(ar, "unsupported HTC service id: %d\n",
1978                            service_id);
1979                return -EINVAL;
1980        }
1981
1982        return 0;
1983}
1984
1985static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
1986                                             u8 *ul_pipe, u8 *dl_pipe)
1987{
1988        ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
1989
1990        /* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our
1991         * case) == 0
1992         */
1993        *ul_pipe = 0;
1994        *dl_pipe = 0;
1995}
1996
1997/* This op is currently only used by htc_wait_target if the HTC ready
1998 * message times out. It is not applicable for SDIO since there is nothing
1999 * we can do if the HTC ready message does not arrive in time.
2000 * TODO: Make this op non mandatory by introducing a NULL check in the
2001 * hif op wrapper.
2002 */
2003static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
2004                                                u8 pipe, int force)
2005{
2006}
2007
2008static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
2009        .tx_sg                  = ath10k_sdio_hif_tx_sg,
2010        .diag_read              = ath10k_sdio_hif_diag_read,
2011        .diag_write             = ath10k_sdio_hif_diag_write_mem,
2012        .exchange_bmi_msg       = ath10k_sdio_bmi_exchange_msg,
2013        .start                  = ath10k_sdio_hif_start,
2014        .stop                   = ath10k_sdio_hif_stop,
2015        .swap_mailbox           = ath10k_sdio_hif_swap_mailbox,
2016        .map_service_to_pipe    = ath10k_sdio_hif_map_service_to_pipe,
2017        .get_default_pipe       = ath10k_sdio_hif_get_default_pipe,
2018        .send_complete_check    = ath10k_sdio_hif_send_complete_check,
2019        .power_up               = ath10k_sdio_hif_power_up,
2020        .power_down             = ath10k_sdio_hif_power_down,
2021#ifdef CONFIG_PM
2022        .suspend                = ath10k_sdio_hif_suspend,
2023        .resume                 = ath10k_sdio_hif_resume,
2024#endif
2025};
2026
2027#ifdef CONFIG_PM_SLEEP
2028
2029/* Empty handlers so that mmc subsystem doesn't remove us entirely during
2030 * suspend. We instead follow cfg80211 suspend/resume handlers.
2031 */
2032static int ath10k_sdio_pm_suspend(struct device *device)
2033{
2034        struct sdio_func *func = dev_to_sdio_func(device);
2035        struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2036        struct ath10k *ar = ar_sdio->ar;
2037        mmc_pm_flag_t pm_flag, pm_caps;
2038        int ret;
2039
2040        if (!device_may_wakeup(ar->dev))
2041                return 0;
2042
2043        pm_flag = MMC_PM_KEEP_POWER;
2044
2045        ret = sdio_set_host_pm_flags(func, pm_flag);
2046        if (ret) {
2047                pm_caps = sdio_get_host_pm_caps(func);
2048                ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
2049                            pm_flag, pm_caps, ret);
2050                return ret;
2051        }
2052
2053        return ret;
2054}
2055
2056static int ath10k_sdio_pm_resume(struct device *device)
2057{
2058        return 0;
2059}
2060
2061static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
2062                         ath10k_sdio_pm_resume);
2063
2064#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
2065
2066#else
2067
2068#define ATH10K_SDIO_PM_OPS NULL
2069
2070#endif /* CONFIG_PM_SLEEP */
2071
2072static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
2073{
2074        struct ath10k *ar = container_of(ctx, struct ath10k, napi);
2075        int done;
2076
2077        done = ath10k_htt_rx_hl_indication(ar, budget);
2078        ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
2079
2080        if (done < budget)
2081                napi_complete_done(ctx, done);
2082
2083        return done;
2084}
2085
2086static int ath10k_sdio_probe(struct sdio_func *func,
2087                             const struct sdio_device_id *id)
2088{
2089        struct ath10k_sdio *ar_sdio;
2090        struct ath10k *ar;
2091        enum ath10k_hw_rev hw_rev;
2092        u32 dev_id_base;
2093        struct ath10k_bus_params bus_params = {};
2094        int ret, i;
2095
2096        /* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
2097         * If there will be newer chipsets that does not use the hw reg
2098         * setup as defined in qca6174_regs and qca6174_values, this
2099         * assumption is no longer valid and hw_rev must be setup differently
2100         * depending on chipset.
2101         */
2102        hw_rev = ATH10K_HW_QCA6174;
2103
2104        ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
2105                                hw_rev, &ath10k_sdio_hif_ops);
2106        if (!ar) {
2107                dev_err(&func->dev, "failed to allocate core\n");
2108                return -ENOMEM;
2109        }
2110
2111        netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
2112                       ATH10K_NAPI_BUDGET);
2113
2114        ath10k_dbg(ar, ATH10K_DBG_BOOT,
2115                   "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
2116                   func->num, func->vendor, func->device,
2117                   func->max_blksize, func->cur_blksize);
2118
2119        ar_sdio = ath10k_sdio_priv(ar);
2120
2121        ar_sdio->irq_data.irq_proc_reg =
2122                devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
2123                             GFP_KERNEL);
2124        if (!ar_sdio->irq_data.irq_proc_reg) {
2125                ret = -ENOMEM;
2126                goto err_core_destroy;
2127        }
2128
2129        ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
2130        if (!ar_sdio->vsg_buffer) {
2131                ret = -ENOMEM;
2132                goto err_core_destroy;
2133        }
2134
2135        ar_sdio->irq_data.irq_en_reg =
2136                devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
2137                             GFP_KERNEL);
2138        if (!ar_sdio->irq_data.irq_en_reg) {
2139                ret = -ENOMEM;
2140                goto err_core_destroy;
2141        }
2142
2143        ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
2144        if (!ar_sdio->bmi_buf) {
2145                ret = -ENOMEM;
2146                goto err_core_destroy;
2147        }
2148
2149        ar_sdio->func = func;
2150        sdio_set_drvdata(func, ar_sdio);
2151
2152        ar_sdio->is_disabled = true;
2153        ar_sdio->ar = ar;
2154
2155        spin_lock_init(&ar_sdio->lock);
2156        spin_lock_init(&ar_sdio->wr_async_lock);
2157        mutex_init(&ar_sdio->irq_data.mtx);
2158
2159        INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
2160        INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
2161
2162        INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
2163        ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
2164        if (!ar_sdio->workqueue) {
2165                ret = -ENOMEM;
2166                goto err_core_destroy;
2167        }
2168
2169        for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
2170                ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
2171
2172        skb_queue_head_init(&ar_sdio->rx_head);
2173        INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
2174
2175        dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
2176        switch (dev_id_base) {
2177        case QCA_MANUFACTURER_ID_AR6005_BASE:
2178        case QCA_MANUFACTURER_ID_QCA9377_BASE:
2179                ar->dev_id = QCA9377_1_0_DEVICE_ID;
2180                break;
2181        default:
2182                ret = -ENODEV;
2183                ath10k_err(ar, "unsupported device id %u (0x%x)\n",
2184                           dev_id_base, id->device);
2185                goto err_free_wq;
2186        }
2187
2188        ar->id.vendor = id->vendor;
2189        ar->id.device = id->device;
2190
2191        ath10k_sdio_set_mbox_info(ar);
2192
2193        bus_params.dev_type = ATH10K_DEV_TYPE_HL;
2194        /* TODO: don't know yet how to get chip_id with SDIO */
2195        bus_params.chip_id = 0;
2196        bus_params.hl_msdu_ids = true;
2197
2198        ar->hw->max_mtu = ETH_DATA_LEN;
2199
2200        ret = ath10k_core_register(ar, &bus_params);
2201        if (ret) {
2202                ath10k_err(ar, "failed to register driver core: %d\n", ret);
2203                goto err_free_wq;
2204        }
2205
2206        return 0;
2207
2208err_free_wq:
2209        destroy_workqueue(ar_sdio->workqueue);
2210err_core_destroy:
2211        ath10k_core_destroy(ar);
2212
2213        return ret;
2214}
2215
2216static void ath10k_sdio_remove(struct sdio_func *func)
2217{
2218        struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2219        struct ath10k *ar = ar_sdio->ar;
2220
2221        ath10k_dbg(ar, ATH10K_DBG_BOOT,
2222                   "sdio removed func %d vendor 0x%x device 0x%x\n",
2223                   func->num, func->vendor, func->device);
2224
2225        ath10k_core_unregister(ar);
2226
2227        netif_napi_del(&ar->napi);
2228
2229        ath10k_core_destroy(ar);
2230
2231        flush_workqueue(ar_sdio->workqueue);
2232        destroy_workqueue(ar_sdio->workqueue);
2233}
2234
2235static const struct sdio_device_id ath10k_sdio_devices[] = {
2236        {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
2237                     (QCA_SDIO_ID_AR6005_BASE | 0xA))},
2238        {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
2239                     (QCA_SDIO_ID_QCA9377_BASE | 0x1))},
2240        {},
2241};
2242
2243MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
2244
2245static struct sdio_driver ath10k_sdio_driver = {
2246        .name = "ath10k_sdio",
2247        .id_table = ath10k_sdio_devices,
2248        .probe = ath10k_sdio_probe,
2249        .remove = ath10k_sdio_remove,
2250        .drv = {
2251                .owner = THIS_MODULE,
2252                .pm = ATH10K_SDIO_PM_OPS,
2253        },
2254};
2255
2256static int __init ath10k_sdio_init(void)
2257{
2258        int ret;
2259
2260        ret = sdio_register_driver(&ath10k_sdio_driver);
2261        if (ret)
2262                pr_err("sdio driver registration failed: %d\n", ret);
2263
2264        return ret;
2265}
2266
2267static void __exit ath10k_sdio_exit(void)
2268{
2269        sdio_unregister_driver(&ath10k_sdio_driver);
2270}
2271
2272module_init(ath10k_sdio_init);
2273module_exit(ath10k_sdio_exit);
2274
2275MODULE_AUTHOR("Qualcomm Atheros");
2276MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
2277MODULE_LICENSE("Dual BSD/GPL");
2278