linux/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/* Copyright (C) 2020 MediaTek Inc. */
   3
   4#include "mt7915.h"
   5#include "../dma.h"
   6#include "mac.h"
   7
   8int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
   9{
  10        int i, err;
  11
  12        err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
  13        if (err < 0)
  14                return err;
  15
  16        for (i = 0; i <= MT_TXQ_PSD; i++)
  17                phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
  18
  19        return 0;
  20}
  21
  22void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
  23                         struct sk_buff *skb)
  24{
  25        struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
  26        __le32 *rxd = (__le32 *)skb->data;
  27        enum rx_pkt_type type;
  28
  29        type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
  30
  31        switch (type) {
  32        case PKT_TYPE_TXRX_NOTIFY:
  33                mt7915_mac_tx_free(dev, skb);
  34                break;
  35        case PKT_TYPE_RX_EVENT:
  36                mt7915_mcu_rx_event(dev, skb);
  37                break;
  38#ifdef CONFIG_NL80211_TESTMODE
  39        case PKT_TYPE_TXRXV:
  40                mt7915_mac_fill_rx_vector(dev, skb);
  41                break;
  42#endif
  43        case PKT_TYPE_NORMAL:
  44                if (!mt7915_mac_fill_rx(dev, skb)) {
  45                        mt76_rx(&dev->mt76, q, skb);
  46                        return;
  47                }
  48                fallthrough;
  49        default:
  50                dev_kfree_skb(skb);
  51                break;
  52        }
  53}
  54
  55static void
  56mt7915_tx_cleanup(struct mt7915_dev *dev)
  57{
  58        mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
  59        mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
  60}
  61
  62static int mt7915_poll_tx(struct napi_struct *napi, int budget)
  63{
  64        struct mt7915_dev *dev;
  65
  66        dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
  67
  68        mt7915_tx_cleanup(dev);
  69
  70        if (napi_complete_done(napi, 0))
  71                mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU);
  72
  73        return 0;
  74}
  75
  76static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
  77{
  78#define PREFETCH(base, depth)   ((base) << 16 | (depth))
  79
  80        mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x0, 0x4));
  81        mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x40, 0x4));
  82        mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x80, 0x0));
  83
  84        mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL + ofs, PREFETCH(0x80, 0x4));
  85        mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL + ofs, PREFETCH(0xc0, 0x4));
  86        mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL + ofs, PREFETCH(0x100, 0x4));
  87        mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL + ofs, PREFETCH(0x140, 0x4));
  88        mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL + ofs, PREFETCH(0x180, 0x4));
  89        mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL + ofs, PREFETCH(0x1c0, 0x4));
  90        mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL + ofs, PREFETCH(0x200, 0x4));
  91        mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL + ofs, PREFETCH(0x240, 0x4));
  92
  93        mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL + ofs, PREFETCH(0x280, 0x4));
  94        mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL + ofs, PREFETCH(0x2c0, 0x4));
  95        mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL + ofs, PREFETCH(0x300, 0x4));
  96        mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL + ofs, PREFETCH(0x340, 0x4));
  97        mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL + ofs, PREFETCH(0x380, 0x4));
  98        mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x0));
  99
 100        mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x4));
 101        mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x400, 0x4));
 102        mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x440, 0x4));
 103        mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL + ofs, PREFETCH(0x480, 0x0));
 104}
 105
 106void mt7915_dma_prefetch(struct mt7915_dev *dev)
 107{
 108        __mt7915_dma_prefetch(dev, 0);
 109        if (dev->hif2)
 110                __mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE);
 111}
 112
 113static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
 114{
 115        static const struct {
 116                u32 phys;
 117                u32 mapped;
 118                u32 size;
 119        } fixed_map[] = {
 120                { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
 121                { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
 122                { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
 123                { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
 124                { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
 125                { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
 126                { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
 127                { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
 128                { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
 129                { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
 130                { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
 131                { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
 132                { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
 133                { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
 134                { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
 135                { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
 136                { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
 137                { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
 138                { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
 139                { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
 140                { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
 141                { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
 142                { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
 143                { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
 144                { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
 145                { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
 146                { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
 147                { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
 148                { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
 149                { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
 150                { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
 151                { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
 152                { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
 153                { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
 154                { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
 155                { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
 156                { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
 157                { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
 158        };
 159        int i;
 160
 161        if (addr < 0x100000)
 162                return addr;
 163
 164        for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
 165                u32 ofs;
 166
 167                if (addr < fixed_map[i].phys)
 168                        continue;
 169
 170                ofs = addr - fixed_map[i].phys;
 171                if (ofs > fixed_map[i].size)
 172                        continue;
 173
 174                return fixed_map[i].mapped + ofs;
 175        }
 176
 177        if ((addr >= 0x18000000 && addr < 0x18c00000) ||
 178            (addr >= 0x70000000 && addr < 0x78000000) ||
 179            (addr >= 0x7c000000 && addr < 0x7c400000))
 180                return mt7915_reg_map_l1(dev, addr);
 181
 182        return mt7915_reg_map_l2(dev, addr);
 183}
 184
 185static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
 186{
 187        struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
 188        u32 addr = __mt7915_reg_addr(dev, offset);
 189
 190        return dev->bus_ops->rr(mdev, addr);
 191}
 192
 193static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
 194{
 195        struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
 196        u32 addr = __mt7915_reg_addr(dev, offset);
 197
 198        dev->bus_ops->wr(mdev, addr, val);
 199}
 200
 201static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
 202{
 203        struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
 204        u32 addr = __mt7915_reg_addr(dev, offset);
 205
 206        return dev->bus_ops->rmw(mdev, addr, mask, val);
 207}
 208
 209int mt7915_dma_init(struct mt7915_dev *dev)
 210{
 211        /* Increase buffer size to receive large VHT/HE MPDUs */
 212        struct mt76_bus_ops *bus_ops;
 213        int rx_buf_size = MT_RX_BUF_SIZE * 2;
 214        u32 hif1_ofs = 0;
 215        int ret;
 216
 217        dev->bus_ops = dev->mt76.bus;
 218        bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
 219                               GFP_KERNEL);
 220        if (!bus_ops)
 221                return -ENOMEM;
 222
 223        bus_ops->rr = mt7915_rr;
 224        bus_ops->wr = mt7915_wr;
 225        bus_ops->rmw = mt7915_rmw;
 226        dev->mt76.bus = bus_ops;
 227
 228        mt76_dma_attach(&dev->mt76);
 229
 230        if (dev->hif2)
 231                hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
 232
 233        /* configure global setting */
 234        mt76_set(dev, MT_WFDMA1_GLO_CFG,
 235                 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
 236                 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
 237
 238        /* reset dma idx */
 239        mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
 240        mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
 241
 242        /* configure delay interrupt */
 243        mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
 244        mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
 245
 246        if (dev->hif2) {
 247                mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
 248                         MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
 249                         MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
 250
 251                mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
 252                mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
 253
 254                mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
 255                mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
 256        }
 257
 258        /* configure perfetch settings */
 259        mt7915_dma_prefetch(dev);
 260
 261        /* init tx queue */
 262        ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0,
 263                                    MT7915_TX_RING_SIZE);
 264        if (ret)
 265                return ret;
 266
 267        /* command to WM */
 268        ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7915_TXQ_MCU_WM,
 269                                  MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
 270        if (ret)
 271                return ret;
 272
 273        /* command to WA */
 274        ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, MT7915_TXQ_MCU_WA,
 275                                  MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
 276        if (ret)
 277                return ret;
 278
 279        /* firmware download */
 280        ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7915_TXQ_FWDL,
 281                                  MT7915_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
 282        if (ret)
 283                return ret;
 284
 285        /* event from WM */
 286        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
 287                               MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
 288                               rx_buf_size, MT_RX_EVENT_RING_BASE);
 289        if (ret)
 290                return ret;
 291
 292        /* event from WA */
 293        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
 294                               MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
 295                               rx_buf_size, MT_RX_EVENT_RING_BASE);
 296        if (ret)
 297                return ret;
 298
 299        /* rx data queue */
 300        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
 301                               MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
 302                               rx_buf_size, MT_RX_DATA_RING_BASE);
 303        if (ret)
 304                return ret;
 305
 306        if (dev->dbdc_support) {
 307                ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
 308                                       MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
 309                                       rx_buf_size,
 310                                       MT_RX_DATA_RING_BASE + hif1_ofs);
 311                if (ret)
 312                        return ret;
 313
 314                /* event from WA */
 315                ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
 316                                       MT7915_RXQ_MCU_WA_EXT,
 317                                       MT7915_RX_MCU_RING_SIZE,
 318                                       rx_buf_size,
 319                                       MT_RX_EVENT_RING_BASE + hif1_ofs);
 320                if (ret)
 321                        return ret;
 322        }
 323
 324        ret = mt76_init_queues(dev);
 325        if (ret < 0)
 326                return ret;
 327
 328        netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
 329                          mt7915_poll_tx, NAPI_POLL_WEIGHT);
 330        napi_enable(&dev->mt76.tx_napi);
 331
 332        /* hif wait WFDMA idle */
 333        mt76_set(dev, MT_WFDMA0_BUSY_ENA,
 334                 MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
 335                 MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
 336                 MT_WFDMA0_BUSY_ENA_RX_FIFO);
 337
 338        mt76_set(dev, MT_WFDMA1_BUSY_ENA,
 339                 MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
 340                 MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
 341                 MT_WFDMA1_BUSY_ENA_RX_FIFO);
 342
 343        mt76_set(dev, MT_WFDMA0_PCIE1_BUSY_ENA,
 344                 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
 345                 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
 346                 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
 347
 348        mt76_set(dev, MT_WFDMA1_PCIE1_BUSY_ENA,
 349                 MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
 350                 MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
 351                 MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
 352
 353        mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
 354                  MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
 355
 356        /* set WFDMA Tx/Rx */
 357        mt76_set(dev, MT_WFDMA0_GLO_CFG,
 358                 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
 359        mt76_set(dev, MT_WFDMA1_GLO_CFG,
 360                 MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
 361
 362        if (dev->hif2) {
 363                mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
 364                         (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
 365                          MT_WFDMA0_GLO_CFG_RX_DMA_EN));
 366                mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
 367                         (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
 368                          MT_WFDMA1_GLO_CFG_RX_DMA_EN));
 369                mt76_set(dev, MT_WFDMA_HOST_CONFIG,
 370                         MT_WFDMA_HOST_CONFIG_PDMA_BAND);
 371        }
 372
 373        /* enable interrupts for TX/RX rings */
 374        mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU |
 375                          MT_INT_MCU_CMD);
 376
 377        return 0;
 378}
 379
 380void mt7915_dma_cleanup(struct mt7915_dev *dev)
 381{
 382        /* disable */
 383        mt76_clear(dev, MT_WFDMA0_GLO_CFG,
 384                   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
 385                   MT_WFDMA0_GLO_CFG_RX_DMA_EN);
 386        mt76_clear(dev, MT_WFDMA1_GLO_CFG,
 387                   MT_WFDMA1_GLO_CFG_TX_DMA_EN |
 388                   MT_WFDMA1_GLO_CFG_RX_DMA_EN);
 389
 390        /* reset */
 391        mt76_clear(dev, MT_WFDMA1_RST,
 392                   MT_WFDMA1_RST_DMASHDL_ALL_RST |
 393                   MT_WFDMA1_RST_LOGIC_RST);
 394
 395        mt76_set(dev, MT_WFDMA1_RST,
 396                 MT_WFDMA1_RST_DMASHDL_ALL_RST |
 397                 MT_WFDMA1_RST_LOGIC_RST);
 398
 399        mt76_clear(dev, MT_WFDMA0_RST,
 400                   MT_WFDMA0_RST_DMASHDL_ALL_RST |
 401                   MT_WFDMA0_RST_LOGIC_RST);
 402
 403        mt76_set(dev, MT_WFDMA0_RST,
 404                 MT_WFDMA0_RST_DMASHDL_ALL_RST |
 405                 MT_WFDMA0_RST_LOGIC_RST);
 406
 407        mt76_dma_cleanup(&dev->mt76);
 408}
 409