linux/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/* Copyright (C) 2020 MediaTek Inc. */
   3
   4#include "mt7921.h"
   5#include "../dma.h"
   6#include "mac.h"
   7
   8int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
   9{
  10        int i, err;
  11
  12        err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
  13        if (err < 0)
  14                return err;
  15
  16        for (i = 0; i <= MT_TXQ_PSD; i++)
  17                phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
  18
  19        return 0;
  20}
  21
  22static int mt7921_poll_tx(struct napi_struct *napi, int budget)
  23{
  24        struct mt7921_dev *dev;
  25
  26        dev = container_of(napi, struct mt7921_dev, mt76.tx_napi);
  27
  28        if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
  29                napi_complete(napi);
  30                queue_work(dev->mt76.wq, &dev->pm.wake_work);
  31                return 0;
  32        }
  33
  34        mt7921_mcu_tx_cleanup(dev);
  35        if (napi_complete(napi))
  36                mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
  37        mt76_connac_pm_unref(&dev->mphy, &dev->pm);
  38
  39        return 0;
  40}
  41
  42static int mt7921_poll_rx(struct napi_struct *napi, int budget)
  43{
  44        struct mt7921_dev *dev;
  45        int done;
  46
  47        dev = container_of(napi->dev, struct mt7921_dev, mt76.napi_dev);
  48
  49        if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
  50                napi_complete(napi);
  51                queue_work(dev->mt76.wq, &dev->pm.wake_work);
  52                return 0;
  53        }
  54        done = mt76_dma_rx_poll(napi, budget);
  55        mt76_connac_pm_unref(&dev->mphy, &dev->pm);
  56
  57        return done;
  58}
  59
  60static void mt7921_dma_prefetch(struct mt7921_dev *dev)
  61{
  62#define PREFETCH(base, depth)   ((base) << 16 | (depth))
  63
  64        mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
  65        mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
  66        mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
  67        mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
  68        mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
  69
  70        mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
  71        mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
  72        mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
  73        mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
  74        mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
  75        mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
  76        mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
  77        mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
  78        mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
  79}
  80
  81static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
  82{
  83        static const struct {
  84                u32 phys;
  85                u32 mapped;
  86                u32 size;
  87        } fixed_map[] = {
  88                { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
  89                { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
  90                { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
  91                { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
  92                { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
  93                { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
  94                { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
  95                { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
  96                { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
  97                { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
  98                { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
  99                { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
 100                { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
 101                { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
 102                { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
 103                { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
 104                { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
 105                { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
 106                { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
 107                { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
 108                { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
 109                { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
 110                { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
 111                { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
 112                { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
 113                { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
 114                { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
 115                { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
 116                { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
 117                { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
 118                { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
 119                { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
 120                { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
 121                { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
 122                { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
 123                { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
 124                { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
 125                { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
 126                { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
 127                { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
 128                { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
 129                { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
 130                { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
 131        };
 132        int i;
 133
 134        if (addr < 0x100000)
 135                return addr;
 136
 137        for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
 138                u32 ofs;
 139
 140                if (addr < fixed_map[i].phys)
 141                        continue;
 142
 143                ofs = addr - fixed_map[i].phys;
 144                if (ofs > fixed_map[i].size)
 145                        continue;
 146
 147                return fixed_map[i].mapped + ofs;
 148        }
 149
 150        if ((addr >= 0x18000000 && addr < 0x18c00000) ||
 151            (addr >= 0x70000000 && addr < 0x78000000) ||
 152            (addr >= 0x7c000000 && addr < 0x7c400000))
 153                return mt7921_reg_map_l1(dev, addr);
 154
 155        dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
 156                addr);
 157
 158        return 0;
 159}
 160
 161static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
 162{
 163        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
 164        u32 addr = __mt7921_reg_addr(dev, offset);
 165
 166        return dev->bus_ops->rr(mdev, addr);
 167}
 168
 169static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
 170{
 171        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
 172        u32 addr = __mt7921_reg_addr(dev, offset);
 173
 174        dev->bus_ops->wr(mdev, addr, val);
 175}
 176
 177static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
 178{
 179        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
 180        u32 addr = __mt7921_reg_addr(dev, offset);
 181
 182        return dev->bus_ops->rmw(mdev, addr, mask, val);
 183}
 184
 185static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
 186{
 187        if (force) {
 188                /* reset */
 189                mt76_clear(dev, MT_WFDMA0_RST,
 190                           MT_WFDMA0_RST_DMASHDL_ALL_RST |
 191                           MT_WFDMA0_RST_LOGIC_RST);
 192
 193                mt76_set(dev, MT_WFDMA0_RST,
 194                         MT_WFDMA0_RST_DMASHDL_ALL_RST |
 195                         MT_WFDMA0_RST_LOGIC_RST);
 196        }
 197
 198        /* disable dmashdl */
 199        mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
 200                   MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
 201        mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
 202
 203        /* disable WFDMA0 */
 204        mt76_clear(dev, MT_WFDMA0_GLO_CFG,
 205                   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
 206                   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
 207                   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
 208                   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
 209                   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
 210
 211        if (!mt76_poll(dev, MT_WFDMA0_GLO_CFG,
 212                       MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
 213                       MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
 214                return -ETIMEDOUT;
 215
 216        return 0;
 217}
 218
 219static int mt7921_dma_enable(struct mt7921_dev *dev)
 220{
 221        /* configure perfetch settings */
 222        mt7921_dma_prefetch(dev);
 223
 224        /* reset dma idx */
 225        mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
 226
 227        /* configure delay interrupt */
 228        mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
 229
 230        mt76_set(dev, MT_WFDMA0_GLO_CFG,
 231                 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
 232                 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
 233                 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
 234                 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
 235                 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
 236                 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
 237
 238        mt76_set(dev, MT_WFDMA0_GLO_CFG,
 239                 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
 240
 241        mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
 242
 243        /* enable interrupts for TX/RX rings */
 244        mt7921_irq_enable(dev,
 245                          MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
 246                          MT_INT_MCU_CMD);
 247        mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
 248
 249        return 0;
 250}
 251
 252static int mt7921_dma_reset(struct mt7921_dev *dev, bool force)
 253{
 254        int i, err;
 255
 256        err = mt7921_dma_disable(dev, force);
 257        if (err)
 258                return err;
 259
 260        /* reset hw queues */
 261        for (i = 0; i < __MT_TXQ_MAX; i++)
 262                mt76_queue_reset(dev, dev->mphy.q_tx[i]);
 263
 264        for (i = 0; i < __MT_MCUQ_MAX; i++)
 265                mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
 266
 267        mt76_for_each_q_rx(&dev->mt76, i)
 268                mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
 269
 270        mt76_tx_status_check(&dev->mt76, true);
 271
 272        return mt7921_dma_enable(dev);
 273}
 274
 275int mt7921_wfsys_reset(struct mt7921_dev *dev)
 276{
 277        mt76_clear(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
 278        msleep(50);
 279        mt76_set(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
 280
 281        if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
 282                              WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500))
 283                return -ETIMEDOUT;
 284
 285        return 0;
 286}
 287
 288int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force)
 289{
 290        int i, err;
 291
 292        /* clean up hw queues */
 293        for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
 294                mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
 295
 296        for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
 297                mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
 298
 299        mt76_for_each_q_rx(&dev->mt76, i)
 300                mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
 301
 302        if (force) {
 303                err = mt7921_wfsys_reset(dev);
 304                if (err)
 305                        return err;
 306        }
 307        err = mt7921_dma_reset(dev, force);
 308        if (err)
 309                return err;
 310
 311        mt76_for_each_q_rx(&dev->mt76, i)
 312                mt76_queue_rx_reset(dev, i);
 313
 314        return 0;
 315}
 316
 317int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
 318{
 319        struct mt76_connac_pm *pm = &dev->pm;
 320        int err;
 321
 322        /* check if the wpdma must be reinitialized */
 323        if (mt7921_dma_need_reinit(dev)) {
 324                /* disable interrutpts */
 325                mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
 326                mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
 327
 328                err = mt7921_wpdma_reset(dev, false);
 329                if (err) {
 330                        dev_err(dev->mt76.dev, "wpdma reset failed\n");
 331                        return err;
 332                }
 333
 334                /* enable interrutpts */
 335                mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
 336                pm->stats.lp_wake++;
 337        }
 338
 339        return 0;
 340}
 341
 342int mt7921_dma_init(struct mt7921_dev *dev)
 343{
 344        struct mt76_bus_ops *bus_ops;
 345        int ret;
 346
 347        dev->phy.dev = dev;
 348        dev->phy.mt76 = &dev->mt76.phy;
 349        dev->mt76.phy.priv = &dev->phy;
 350        dev->bus_ops = dev->mt76.bus;
 351        bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
 352                               GFP_KERNEL);
 353        if (!bus_ops)
 354                return -ENOMEM;
 355
 356        bus_ops->rr = mt7921_rr;
 357        bus_ops->wr = mt7921_wr;
 358        bus_ops->rmw = mt7921_rmw;
 359        dev->mt76.bus = bus_ops;
 360
 361        mt76_dma_attach(&dev->mt76);
 362
 363        ret = mt7921_dma_disable(dev, true);
 364        if (ret)
 365                return ret;
 366
 367        ret = mt7921_wfsys_reset(dev);
 368        if (ret)
 369                return ret;
 370
 371        /* init tx queue */
 372        ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
 373                                    MT7921_TX_RING_SIZE);
 374        if (ret)
 375                return ret;
 376
 377        mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, 0x4);
 378
 379        /* command to WM */
 380        ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7921_TXQ_MCU_WM,
 381                                  MT7921_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
 382        if (ret)
 383                return ret;
 384
 385        /* firmware download */
 386        ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7921_TXQ_FWDL,
 387                                  MT7921_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
 388        if (ret)
 389                return ret;
 390
 391        /* event from WM before firmware download */
 392        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
 393                               MT7921_RXQ_MCU_WM,
 394                               MT7921_RX_MCU_RING_SIZE,
 395                               MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
 396        if (ret)
 397                return ret;
 398
 399        /* Change mcu queue after firmware download */
 400        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
 401                               MT7921_RXQ_MCU_WM,
 402                               MT7921_RX_MCU_RING_SIZE,
 403                               MT_RX_BUF_SIZE, MT_WFDMA0(0x540));
 404        if (ret)
 405                return ret;
 406
 407        /* rx data */
 408        ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
 409                               MT7921_RXQ_BAND0, MT7921_RX_RING_SIZE,
 410                               MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
 411        if (ret)
 412                return ret;
 413
 414        ret = mt76_init_queues(dev, mt7921_poll_rx);
 415        if (ret < 0)
 416                return ret;
 417
 418        netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
 419                          mt7921_poll_tx, NAPI_POLL_WEIGHT);
 420        napi_enable(&dev->mt76.tx_napi);
 421
 422        return mt7921_dma_enable(dev);
 423}
 424
 425void mt7921_dma_cleanup(struct mt7921_dev *dev)
 426{
 427        /* disable */
 428        mt76_clear(dev, MT_WFDMA0_GLO_CFG,
 429                   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
 430                   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
 431                   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
 432                   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
 433                   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
 434                   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
 435
 436        /* reset */
 437        mt76_clear(dev, MT_WFDMA0_RST,
 438                   MT_WFDMA0_RST_DMASHDL_ALL_RST |
 439                   MT_WFDMA0_RST_LOGIC_RST);
 440
 441        mt76_set(dev, MT_WFDMA0_RST,
 442                 MT_WFDMA0_RST_DMASHDL_ALL_RST |
 443                 MT_WFDMA0_RST_LOGIC_RST);
 444
 445        mt76_dma_cleanup(&dev->mt76);
 446}
 447