linux/drivers/net/wireless/ath/wcn36xx/dxe.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
   3 *
   4 * Permission to use, copy, modify, and/or distribute this software for any
   5 * purpose with or without fee is hereby granted, provided that the above
   6 * copyright notice and this permission notice appear in all copies.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15 */
  16
  17/* DXE - DMA transfer engine
  18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
  19 * through low channels data packets are transfered
  20 * through high channels managment packets are transfered
  21 */
  22
  23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24
  25#include <linux/interrupt.h>
  26#include <linux/soc/qcom/smem_state.h>
  27#include "wcn36xx.h"
  28#include "txrx.h"
  29
  30static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
  31{
  32        wcn36xx_dbg(WCN36XX_DBG_DXE,
  33                    "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
  34                    addr, data);
  35
  36        writel(data, wcn->ccu_base + addr);
  37}
  38
  39static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
  40{
  41        wcn36xx_dbg(WCN36XX_DBG_DXE,
  42                    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
  43                    addr, data);
  44
  45        writel(data, wcn->dxe_base + addr);
  46}
  47
  48static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
  49{
  50        *data = readl(wcn->dxe_base + addr);
  51
  52        wcn36xx_dbg(WCN36XX_DBG_DXE,
  53                    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
  54                    addr, *data);
  55}
  56
  57static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
  58{
  59        struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
  60        int i;
  61
  62        for (i = 0; i < ch->desc_num && ctl; i++) {
  63                next = ctl->next;
  64                kfree(ctl);
  65                ctl = next;
  66        }
  67}
  68
  69static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
  70{
  71        struct wcn36xx_dxe_ctl *prev_ctl = NULL;
  72        struct wcn36xx_dxe_ctl *cur_ctl = NULL;
  73        int i;
  74
  75        spin_lock_init(&ch->lock);
  76        for (i = 0; i < ch->desc_num; i++) {
  77                cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
  78                if (!cur_ctl)
  79                        goto out_fail;
  80
  81                cur_ctl->ctl_blk_order = i;
  82                if (i == 0) {
  83                        ch->head_blk_ctl = cur_ctl;
  84                        ch->tail_blk_ctl = cur_ctl;
  85                } else if (ch->desc_num - 1 == i) {
  86                        prev_ctl->next = cur_ctl;
  87                        cur_ctl->next = ch->head_blk_ctl;
  88                } else {
  89                        prev_ctl->next = cur_ctl;
  90                }
  91                prev_ctl = cur_ctl;
  92        }
  93
  94        return 0;
  95
  96out_fail:
  97        wcn36xx_dxe_free_ctl_block(ch);
  98        return -ENOMEM;
  99}
 100
 101int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
 102{
 103        int ret;
 104
 105        wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
 106        wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
 107        wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
 108        wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
 109
 110        wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
 111        wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
 112        wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
 113        wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
 114
 115        wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
 116        wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
 117
 118        wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
 119        wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
 120
 121        wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
 122        wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
 123
 124        wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
 125        wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
 126
 127        wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
 128        wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
 129
 130        /* DXE control block allocation */
 131        ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
 132        if (ret)
 133                goto out_err;
 134        ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
 135        if (ret)
 136                goto out_err;
 137        ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
 138        if (ret)
 139                goto out_err;
 140        ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
 141        if (ret)
 142                goto out_err;
 143
 144        /* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
 145        ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
 146                                          WCN36XX_SMSM_WLAN_TX_ENABLE |
 147                                          WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
 148                                          WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
 149        if (ret)
 150                goto out_err;
 151
 152        return 0;
 153
 154out_err:
 155        wcn36xx_err("Failed to allocate DXE control blocks\n");
 156        wcn36xx_dxe_free_ctl_blks(wcn);
 157        return -ENOMEM;
 158}
 159
 160void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
 161{
 162        wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
 163        wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
 164        wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
 165        wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
 166}
 167
 168static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
 169{
 170        struct wcn36xx_dxe_desc *cur_dxe = NULL;
 171        struct wcn36xx_dxe_desc *prev_dxe = NULL;
 172        struct wcn36xx_dxe_ctl *cur_ctl = NULL;
 173        size_t size;
 174        int i;
 175
 176        size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
 177        wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
 178                                              GFP_KERNEL);
 179        if (!wcn_ch->cpu_addr)
 180                return -ENOMEM;
 181
 182        cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
 183        cur_ctl = wcn_ch->head_blk_ctl;
 184
 185        for (i = 0; i < wcn_ch->desc_num; i++) {
 186                cur_ctl->desc = cur_dxe;
 187                cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
 188                        i * sizeof(struct wcn36xx_dxe_desc);
 189
 190                switch (wcn_ch->ch_type) {
 191                case WCN36XX_DXE_CH_TX_L:
 192                        cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
 193                        cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
 194                        break;
 195                case WCN36XX_DXE_CH_TX_H:
 196                        cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
 197                        cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
 198                        break;
 199                case WCN36XX_DXE_CH_RX_L:
 200                        cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
 201                        cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
 202                        break;
 203                case WCN36XX_DXE_CH_RX_H:
 204                        cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
 205                        cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
 206                        break;
 207                }
 208                if (0 == i) {
 209                        cur_dxe->phy_next_l = 0;
 210                } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
 211                        prev_dxe->phy_next_l =
 212                                cur_ctl->desc_phy_addr;
 213                } else if (i == (wcn_ch->desc_num - 1)) {
 214                        prev_dxe->phy_next_l =
 215                                cur_ctl->desc_phy_addr;
 216                        cur_dxe->phy_next_l =
 217                                wcn_ch->head_blk_ctl->desc_phy_addr;
 218                }
 219                cur_ctl = cur_ctl->next;
 220                prev_dxe = cur_dxe;
 221                cur_dxe++;
 222        }
 223
 224        return 0;
 225}
 226
 227static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
 228{
 229        size_t size;
 230
 231        size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
 232        dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
 233}
 234
 235static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
 236                                   struct wcn36xx_dxe_mem_pool *pool)
 237{
 238        int i, chunk_size = pool->chunk_size;
 239        dma_addr_t bd_phy_addr = pool->phy_addr;
 240        void *bd_cpu_addr = pool->virt_addr;
 241        struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
 242
 243        for (i = 0; i < ch->desc_num; i++) {
 244                /* Only every second dxe needs a bd pointer,
 245                   the other will point to the skb data */
 246                if (!(i & 1)) {
 247                        cur->bd_phy_addr = bd_phy_addr;
 248                        cur->bd_cpu_addr = bd_cpu_addr;
 249                        bd_phy_addr += chunk_size;
 250                        bd_cpu_addr += chunk_size;
 251                } else {
 252                        cur->bd_phy_addr = 0;
 253                        cur->bd_cpu_addr = NULL;
 254                }
 255                cur = cur->next;
 256        }
 257}
 258
 259static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
 260{
 261        int reg_data = 0;
 262
 263        wcn36xx_dxe_read_register(wcn,
 264                                  WCN36XX_DXE_INT_MASK_REG,
 265                                  &reg_data);
 266
 267        reg_data |= wcn_ch;
 268
 269        wcn36xx_dxe_write_register(wcn,
 270                                   WCN36XX_DXE_INT_MASK_REG,
 271                                   (int)reg_data);
 272        return 0;
 273}
 274
 275static int wcn36xx_dxe_fill_skb(struct device *dev,
 276                                struct wcn36xx_dxe_ctl *ctl,
 277                                gfp_t gfp)
 278{
 279        struct wcn36xx_dxe_desc *dxe = ctl->desc;
 280        struct sk_buff *skb;
 281
 282        skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
 283        if (skb == NULL)
 284                return -ENOMEM;
 285
 286        dxe->dst_addr_l = dma_map_single(dev,
 287                                         skb_tail_pointer(skb),
 288                                         WCN36XX_PKT_SIZE,
 289                                         DMA_FROM_DEVICE);
 290        if (dma_mapping_error(dev, dxe->dst_addr_l)) {
 291                dev_err(dev, "unable to map skb\n");
 292                kfree_skb(skb);
 293                return -ENOMEM;
 294        }
 295        ctl->skb = skb;
 296
 297        return 0;
 298}
 299
 300static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
 301                                    struct wcn36xx_dxe_ch *wcn_ch)
 302{
 303        int i;
 304        struct wcn36xx_dxe_ctl *cur_ctl = NULL;
 305
 306        cur_ctl = wcn_ch->head_blk_ctl;
 307
 308        for (i = 0; i < wcn_ch->desc_num; i++) {
 309                wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
 310                cur_ctl = cur_ctl->next;
 311        }
 312
 313        return 0;
 314}
 315
 316static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
 317                                     struct wcn36xx_dxe_ch *wcn_ch)
 318{
 319        struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
 320        int i;
 321
 322        for (i = 0; i < wcn_ch->desc_num; i++) {
 323                kfree_skb(cur->skb);
 324                cur = cur->next;
 325        }
 326}
 327
 328void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
 329{
 330        struct ieee80211_tx_info *info;
 331        struct sk_buff *skb;
 332        unsigned long flags;
 333
 334        spin_lock_irqsave(&wcn->dxe_lock, flags);
 335        skb = wcn->tx_ack_skb;
 336        wcn->tx_ack_skb = NULL;
 337        del_timer(&wcn->tx_ack_timer);
 338        spin_unlock_irqrestore(&wcn->dxe_lock, flags);
 339
 340        if (!skb) {
 341                wcn36xx_warn("Spurious TX complete indication\n");
 342                return;
 343        }
 344
 345        info = IEEE80211_SKB_CB(skb);
 346
 347        if (status == 1)
 348                info->flags |= IEEE80211_TX_STAT_ACK;
 349        else
 350                info->flags &= ~IEEE80211_TX_STAT_ACK;
 351
 352        wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
 353
 354        ieee80211_tx_status_irqsafe(wcn->hw, skb);
 355        ieee80211_wake_queues(wcn->hw);
 356}
 357
 358static void wcn36xx_dxe_tx_timer(struct timer_list *t)
 359{
 360        struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
 361        struct ieee80211_tx_info *info;
 362        unsigned long flags;
 363        struct sk_buff *skb;
 364
 365        /* TX Timeout */
 366        wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
 367
 368        spin_lock_irqsave(&wcn->dxe_lock, flags);
 369        skb = wcn->tx_ack_skb;
 370        wcn->tx_ack_skb = NULL;
 371        spin_unlock_irqrestore(&wcn->dxe_lock, flags);
 372
 373        if (!skb)
 374                return;
 375
 376        info = IEEE80211_SKB_CB(skb);
 377        info->flags &= ~IEEE80211_TX_STAT_ACK;
 378        info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 379
 380        ieee80211_tx_status_irqsafe(wcn->hw, skb);
 381        ieee80211_wake_queues(wcn->hw);
 382}
 383
 384static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
 385{
 386        struct wcn36xx_dxe_ctl *ctl;
 387        struct ieee80211_tx_info *info;
 388        unsigned long flags;
 389
 390        /*
 391         * Make at least one loop of do-while because in case ring is
 392         * completely full head and tail are pointing to the same element
 393         * and while-do will not make any cycles.
 394         */
 395        spin_lock_irqsave(&ch->lock, flags);
 396        ctl = ch->tail_blk_ctl;
 397        do {
 398                if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
 399                        break;
 400
 401                if (ctl->skb &&
 402                    READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
 403                        dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
 404                                         ctl->skb->len, DMA_TO_DEVICE);
 405                        info = IEEE80211_SKB_CB(ctl->skb);
 406                        if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
 407                                /* Keep frame until TX status comes */
 408                                ieee80211_free_txskb(wcn->hw, ctl->skb);
 409                        }
 410
 411                        if (wcn->queues_stopped) {
 412                                wcn->queues_stopped = false;
 413                                ieee80211_wake_queues(wcn->hw);
 414                        }
 415
 416                        ctl->skb = NULL;
 417                }
 418                ctl = ctl->next;
 419        } while (ctl != ch->head_blk_ctl);
 420
 421        ch->tail_blk_ctl = ctl;
 422        spin_unlock_irqrestore(&ch->lock, flags);
 423}
 424
 425static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
 426{
 427        struct wcn36xx *wcn = (struct wcn36xx *)dev;
 428        int int_src, int_reason;
 429        bool transmitted = false;
 430
 431        wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
 432
 433        if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
 434                wcn36xx_dxe_read_register(wcn,
 435                                          WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
 436                                          &int_reason);
 437
 438                wcn36xx_dxe_write_register(wcn,
 439                                           WCN36XX_DXE_0_INT_CLR,
 440                                           WCN36XX_INT_MASK_CHAN_TX_H);
 441
 442                if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
 443                        wcn36xx_dxe_write_register(wcn,
 444                                                   WCN36XX_DXE_0_INT_ERR_CLR,
 445                                                   WCN36XX_INT_MASK_CHAN_TX_H);
 446
 447                        wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
 448                                        int_src);
 449                }
 450
 451                if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
 452                        wcn36xx_dxe_write_register(wcn,
 453                                                   WCN36XX_DXE_0_INT_DONE_CLR,
 454                                                   WCN36XX_INT_MASK_CHAN_TX_H);
 455                }
 456
 457                if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
 458                        wcn36xx_dxe_write_register(wcn,
 459                                                   WCN36XX_DXE_0_INT_ED_CLR,
 460                                                   WCN36XX_INT_MASK_CHAN_TX_H);
 461                }
 462
 463                wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
 464                            int_reason);
 465
 466                if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
 467                                  WCN36XX_CH_STAT_INT_ED_MASK)) {
 468                        reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
 469                        transmitted = true;
 470                }
 471        }
 472
 473        if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
 474                wcn36xx_dxe_read_register(wcn,
 475                                          WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
 476                                          &int_reason);
 477
 478                wcn36xx_dxe_write_register(wcn,
 479                                           WCN36XX_DXE_0_INT_CLR,
 480                                           WCN36XX_INT_MASK_CHAN_TX_L);
 481
 482
 483                if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
 484                        wcn36xx_dxe_write_register(wcn,
 485                                                   WCN36XX_DXE_0_INT_ERR_CLR,
 486                                                   WCN36XX_INT_MASK_CHAN_TX_L);
 487
 488                        wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
 489                                        int_src);
 490                }
 491
 492                if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
 493                        wcn36xx_dxe_write_register(wcn,
 494                                                   WCN36XX_DXE_0_INT_DONE_CLR,
 495                                                   WCN36XX_INT_MASK_CHAN_TX_L);
 496                }
 497
 498                if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
 499                        wcn36xx_dxe_write_register(wcn,
 500                                                   WCN36XX_DXE_0_INT_ED_CLR,
 501                                                   WCN36XX_INT_MASK_CHAN_TX_L);
 502                }
 503
 504                wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
 505                            int_reason);
 506
 507                if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
 508                                  WCN36XX_CH_STAT_INT_ED_MASK)) {
 509                        reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
 510                        transmitted = true;
 511                }
 512        }
 513
 514        spin_lock(&wcn->dxe_lock);
 515        if (wcn->tx_ack_skb && transmitted) {
 516                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(wcn->tx_ack_skb);
 517
 518                /* TX complete, no need to wait for 802.11 ack indication */
 519                if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS &&
 520                    info->flags & IEEE80211_TX_CTL_NO_ACK) {
 521                        info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 522                        del_timer(&wcn->tx_ack_timer);
 523                        ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
 524                        wcn->tx_ack_skb = NULL;
 525                        ieee80211_wake_queues(wcn->hw);
 526                }
 527        }
 528        spin_unlock(&wcn->dxe_lock);
 529
 530        return IRQ_HANDLED;
 531}
 532
 533static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
 534{
 535        struct wcn36xx *wcn = (struct wcn36xx *)dev;
 536
 537        wcn36xx_dxe_rx_frame(wcn);
 538
 539        return IRQ_HANDLED;
 540}
 541
 542static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
 543{
 544        int ret;
 545
 546        ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
 547                          IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
 548        if (ret) {
 549                wcn36xx_err("failed to alloc tx irq\n");
 550                goto out_err;
 551        }
 552
 553        ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
 554                          "wcn36xx_rx", wcn);
 555        if (ret) {
 556                wcn36xx_err("failed to alloc rx irq\n");
 557                goto out_txirq;
 558        }
 559
 560        enable_irq_wake(wcn->rx_irq);
 561
 562        return 0;
 563
 564out_txirq:
 565        free_irq(wcn->tx_irq, wcn);
 566out_err:
 567        return ret;
 568
 569}
 570
 571static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
 572                                     struct wcn36xx_dxe_ch *ch,
 573                                     u32 ctrl,
 574                                     u32 en_mask,
 575                                     u32 int_mask,
 576                                     u32 status_reg)
 577{
 578        struct wcn36xx_dxe_desc *dxe;
 579        struct wcn36xx_dxe_ctl *ctl;
 580        dma_addr_t  dma_addr;
 581        struct sk_buff *skb;
 582        u32 int_reason;
 583        int ret;
 584
 585        wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
 586        wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
 587
 588        if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
 589                wcn36xx_dxe_write_register(wcn,
 590                                           WCN36XX_DXE_0_INT_ERR_CLR,
 591                                           int_mask);
 592
 593                wcn36xx_err("DXE IRQ reported error on RX channel\n");
 594        }
 595
 596        if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
 597                wcn36xx_dxe_write_register(wcn,
 598                                           WCN36XX_DXE_0_INT_DONE_CLR,
 599                                           int_mask);
 600
 601        if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
 602                wcn36xx_dxe_write_register(wcn,
 603                                           WCN36XX_DXE_0_INT_ED_CLR,
 604                                           int_mask);
 605
 606        if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
 607                            WCN36XX_CH_STAT_INT_ED_MASK)))
 608                return 0;
 609
 610        spin_lock(&ch->lock);
 611
 612        ctl = ch->head_blk_ctl;
 613        dxe = ctl->desc;
 614
 615        while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
 616                skb = ctl->skb;
 617                dma_addr = dxe->dst_addr_l;
 618                ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
 619                if (0 == ret) {
 620                        /* new skb allocation ok. Use the new one and queue
 621                         * the old one to network system.
 622                         */
 623                        dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
 624                                        DMA_FROM_DEVICE);
 625                        wcn36xx_rx_skb(wcn, skb);
 626                } /* else keep old skb not submitted and use it for rx DMA */
 627
 628                dxe->ctrl = ctrl;
 629                ctl = ctl->next;
 630                dxe = ctl->desc;
 631        }
 632        wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
 633
 634        ch->head_blk_ctl = ctl;
 635
 636        spin_unlock(&ch->lock);
 637
 638        return 0;
 639}
 640
 641void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
 642{
 643        int int_src;
 644
 645        wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
 646
 647        /* RX_LOW_PRI */
 648        if (int_src & WCN36XX_DXE_INT_CH1_MASK)
 649                wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
 650                                          WCN36XX_DXE_CTRL_RX_L,
 651                                          WCN36XX_DXE_INT_CH1_MASK,
 652                                          WCN36XX_INT_MASK_CHAN_RX_L,
 653                                          WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
 654
 655        /* RX_HIGH_PRI */
 656        if (int_src & WCN36XX_DXE_INT_CH3_MASK)
 657                wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
 658                                          WCN36XX_DXE_CTRL_RX_H,
 659                                          WCN36XX_DXE_INT_CH3_MASK,
 660                                          WCN36XX_INT_MASK_CHAN_RX_H,
 661                                          WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
 662
 663        if (!int_src)
 664                wcn36xx_warn("No DXE interrupt pending\n");
 665}
 666
 667int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
 668{
 669        size_t s;
 670        void *cpu_addr;
 671
 672        /* Allocate BD headers for MGMT frames */
 673
 674        /* Where this come from ask QC */
 675        wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
 676                16 - (WCN36XX_BD_CHUNK_SIZE % 8);
 677
 678        s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
 679        cpu_addr = dma_alloc_coherent(wcn->dev, s,
 680                                      &wcn->mgmt_mem_pool.phy_addr,
 681                                      GFP_KERNEL);
 682        if (!cpu_addr)
 683                goto out_err;
 684
 685        wcn->mgmt_mem_pool.virt_addr = cpu_addr;
 686
 687        /* Allocate BD headers for DATA frames */
 688
 689        /* Where this come from ask QC */
 690        wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
 691                16 - (WCN36XX_BD_CHUNK_SIZE % 8);
 692
 693        s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
 694        cpu_addr = dma_alloc_coherent(wcn->dev, s,
 695                                      &wcn->data_mem_pool.phy_addr,
 696                                      GFP_KERNEL);
 697        if (!cpu_addr)
 698                goto out_err;
 699
 700        wcn->data_mem_pool.virt_addr = cpu_addr;
 701
 702        return 0;
 703
 704out_err:
 705        wcn36xx_dxe_free_mem_pools(wcn);
 706        wcn36xx_err("Failed to allocate BD mempool\n");
 707        return -ENOMEM;
 708}
 709
 710void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
 711{
 712        if (wcn->mgmt_mem_pool.virt_addr)
 713                dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
 714                                  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
 715                                  wcn->mgmt_mem_pool.virt_addr,
 716                                  wcn->mgmt_mem_pool.phy_addr);
 717
 718        if (wcn->data_mem_pool.virt_addr) {
 719                dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
 720                                  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
 721                                  wcn->data_mem_pool.virt_addr,
 722                                  wcn->data_mem_pool.phy_addr);
 723        }
 724}
 725
 726int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
 727                         struct wcn36xx_vif *vif_priv,
 728                         struct wcn36xx_tx_bd *bd,
 729                         struct sk_buff *skb,
 730                         bool is_low)
 731{
 732        struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
 733        struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
 734        struct wcn36xx_dxe_ch *ch = NULL;
 735        unsigned long flags;
 736        int ret;
 737
 738        ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
 739
 740        spin_lock_irqsave(&ch->lock, flags);
 741        ctl_bd = ch->head_blk_ctl;
 742        ctl_skb = ctl_bd->next;
 743
 744        /*
 745         * If skb is not null that means that we reached the tail of the ring
 746         * hence ring is full. Stop queues to let mac80211 back off until ring
 747         * has an empty slot again.
 748         */
 749        if (NULL != ctl_skb->skb) {
 750                ieee80211_stop_queues(wcn->hw);
 751                wcn->queues_stopped = true;
 752                spin_unlock_irqrestore(&ch->lock, flags);
 753                return -EBUSY;
 754        }
 755
 756        if (unlikely(ctl_skb->bd_cpu_addr)) {
 757                wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
 758                ret = -EINVAL;
 759                goto unlock;
 760        }
 761
 762        desc_bd = ctl_bd->desc;
 763        desc_skb = ctl_skb->desc;
 764
 765        ctl_bd->skb = NULL;
 766
 767        /* write buffer descriptor */
 768        memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
 769
 770        /* Set source address of the BD we send */
 771        desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
 772        desc_bd->dst_addr_l = ch->dxe_wq;
 773        desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
 774
 775        wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
 776
 777        wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
 778                         (char *)desc_bd, sizeof(*desc_bd));
 779        wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
 780                         "BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
 781                         sizeof(struct wcn36xx_tx_bd));
 782
 783        desc_skb->src_addr_l = dma_map_single(wcn->dev,
 784                                              skb->data,
 785                                              skb->len,
 786                                              DMA_TO_DEVICE);
 787        if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
 788                dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
 789                ret = -ENOMEM;
 790                goto unlock;
 791        }
 792
 793        ctl_skb->skb = skb;
 794        desc_skb->dst_addr_l = ch->dxe_wq;
 795        desc_skb->fr_len = ctl_skb->skb->len;
 796
 797        wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
 798                         (char *)desc_skb, sizeof(*desc_skb));
 799        wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
 800                         (char *)ctl_skb->skb->data, ctl_skb->skb->len);
 801
 802        /* Move the head of the ring to the next empty descriptor */
 803         ch->head_blk_ctl = ctl_skb->next;
 804
 805        /* Commit all previous writes and set descriptors to VALID */
 806        wmb();
 807        desc_skb->ctrl = ch->ctrl_skb;
 808        wmb();
 809        desc_bd->ctrl = ch->ctrl_bd;
 810
 811        /*
 812         * When connected and trying to send data frame chip can be in sleep
 813         * mode and writing to the register will not wake up the chip. Instead
 814         * notify chip about new frame through SMSM bus.
 815         */
 816        if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
 817                qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
 818                                            WCN36XX_SMSM_WLAN_TX_ENABLE,
 819                                            WCN36XX_SMSM_WLAN_TX_ENABLE);
 820        } else {
 821                /* indicate End Of Packet and generate interrupt on descriptor
 822                 * done.
 823                 */
 824                wcn36xx_dxe_write_register(wcn,
 825                        ch->reg_ctrl, ch->def_ctrl);
 826        }
 827
 828        ret = 0;
 829unlock:
 830        spin_unlock_irqrestore(&ch->lock, flags);
 831        return ret;
 832}
 833
 834int wcn36xx_dxe_init(struct wcn36xx *wcn)
 835{
 836        int reg_data = 0, ret;
 837
 838        reg_data = WCN36XX_DXE_REG_RESET;
 839        wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
 840
 841        /* Select channels for rx avail and xfer done interrupts... */
 842        reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
 843                    WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
 844        if (wcn->is_pronto)
 845                wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
 846        else
 847                wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
 848
 849        /***************************************/
 850        /* Init descriptors for TX LOW channel */
 851        /***************************************/
 852        ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
 853        if (ret) {
 854                dev_err(wcn->dev, "Error allocating descriptor\n");
 855                return ret;
 856        }
 857        wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
 858
 859        /* Write channel head to a NEXT register */
 860        wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
 861                wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
 862
 863        /* Program DMA destination addr for TX LOW */
 864        wcn36xx_dxe_write_register(wcn,
 865                WCN36XX_DXE_CH_DEST_ADDR_TX_L,
 866                WCN36XX_DXE_WQ_TX_L);
 867
 868        wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
 869        wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
 870
 871        /***************************************/
 872        /* Init descriptors for TX HIGH channel */
 873        /***************************************/
 874        ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
 875        if (ret) {
 876                dev_err(wcn->dev, "Error allocating descriptor\n");
 877                goto out_err_txh_ch;
 878        }
 879
 880        wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
 881
 882        /* Write channel head to a NEXT register */
 883        wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
 884                wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
 885
 886        /* Program DMA destination addr for TX HIGH */
 887        wcn36xx_dxe_write_register(wcn,
 888                WCN36XX_DXE_CH_DEST_ADDR_TX_H,
 889                WCN36XX_DXE_WQ_TX_H);
 890
 891        wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
 892
 893        /* Enable channel interrupts */
 894        wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
 895
 896        /***************************************/
 897        /* Init descriptors for RX LOW channel */
 898        /***************************************/
 899        ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
 900        if (ret) {
 901                dev_err(wcn->dev, "Error allocating descriptor\n");
 902                goto out_err_rxl_ch;
 903        }
 904
 905
 906        /* For RX we need to preallocated buffers */
 907        wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
 908
 909        /* Write channel head to a NEXT register */
 910        wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
 911                wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
 912
 913        /* Write DMA source address */
 914        wcn36xx_dxe_write_register(wcn,
 915                WCN36XX_DXE_CH_SRC_ADDR_RX_L,
 916                WCN36XX_DXE_WQ_RX_L);
 917
 918        /* Program preallocated destination address */
 919        wcn36xx_dxe_write_register(wcn,
 920                WCN36XX_DXE_CH_DEST_ADDR_RX_L,
 921                wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
 922
 923        /* Enable default control registers */
 924        wcn36xx_dxe_write_register(wcn,
 925                WCN36XX_DXE_REG_CTL_RX_L,
 926                WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
 927
 928        /* Enable channel interrupts */
 929        wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
 930
 931        /***************************************/
 932        /* Init descriptors for RX HIGH channel */
 933        /***************************************/
 934        ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
 935        if (ret) {
 936                dev_err(wcn->dev, "Error allocating descriptor\n");
 937                goto out_err_rxh_ch;
 938        }
 939
 940        /* For RX we need to prealocat buffers */
 941        wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
 942
 943        /* Write chanel head to a NEXT register */
 944        wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
 945                wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
 946
 947        /* Write DMA source address */
 948        wcn36xx_dxe_write_register(wcn,
 949                WCN36XX_DXE_CH_SRC_ADDR_RX_H,
 950                WCN36XX_DXE_WQ_RX_H);
 951
 952        /* Program preallocated destination address */
 953        wcn36xx_dxe_write_register(wcn,
 954                WCN36XX_DXE_CH_DEST_ADDR_RX_H,
 955                 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
 956
 957        /* Enable default control registers */
 958        wcn36xx_dxe_write_register(wcn,
 959                WCN36XX_DXE_REG_CTL_RX_H,
 960                WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
 961
 962        /* Enable channel interrupts */
 963        wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
 964
 965        ret = wcn36xx_dxe_request_irqs(wcn);
 966        if (ret < 0)
 967                goto out_err_irq;
 968
 969        timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
 970
 971        return 0;
 972
 973out_err_irq:
 974        wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
 975out_err_rxh_ch:
 976        wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
 977out_err_rxl_ch:
 978        wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
 979out_err_txh_ch:
 980        wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
 981
 982        return ret;
 983}
 984
 985void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
 986{
 987        free_irq(wcn->tx_irq, wcn);
 988        free_irq(wcn->rx_irq, wcn);
 989        del_timer(&wcn->tx_ack_timer);
 990
 991        if (wcn->tx_ack_skb) {
 992                ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
 993                wcn->tx_ack_skb = NULL;
 994        }
 995
 996        wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
 997        wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
 998}
 999