linux/drivers/net/wireless/mediatek/mt76/sdio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/* Copyright (C) 2020 MediaTek Inc.
   3 *
   4 * This file is written based on mt76/usb.c.
   5 *
   6 * Author: Felix Fietkau <nbd@nbd.name>
   7 *         Lorenzo Bianconi <lorenzo@kernel.org>
   8 *         Sean Wang <sean.wang@mediatek.com>
   9 */
  10
  11#include <linux/iopoll.h>
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14#include <linux/mmc/sdio_func.h>
  15#include <linux/sched.h>
  16#include <linux/kthread.h>
  17
  18#include "mt76.h"
  19
  20static int
  21mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
  22{
  23        struct mt76_queue *q = &dev->q_rx[qid];
  24
  25        spin_lock_init(&q->lock);
  26        q->entry = devm_kcalloc(dev->dev,
  27                                MT_NUM_RX_ENTRIES, sizeof(*q->entry),
  28                                GFP_KERNEL);
  29        if (!q->entry)
  30                return -ENOMEM;
  31
  32        q->ndesc = MT_NUM_RX_ENTRIES;
  33        q->head = q->tail = 0;
  34        q->queued = 0;
  35
  36        return 0;
  37}
  38
  39static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
  40{
  41        struct mt76_queue *q;
  42
  43        q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
  44        if (!q)
  45                return ERR_PTR(-ENOMEM);
  46
  47        spin_lock_init(&q->lock);
  48        q->entry = devm_kcalloc(dev->dev,
  49                                MT_NUM_TX_ENTRIES, sizeof(*q->entry),
  50                                GFP_KERNEL);
  51        if (!q->entry)
  52                return ERR_PTR(-ENOMEM);
  53
  54        q->ndesc = MT_NUM_TX_ENTRIES;
  55
  56        return q;
  57}
  58
  59static int mt76s_alloc_tx(struct mt76_dev *dev)
  60{
  61        struct mt76_queue *q;
  62        int i;
  63
  64        for (i = 0; i <= MT_TXQ_PSD; i++) {
  65                q = mt76s_alloc_tx_queue(dev);
  66                if (IS_ERR(q))
  67                        return PTR_ERR(q);
  68
  69                q->qid = i;
  70                dev->phy.q_tx[i] = q;
  71        }
  72
  73        q = mt76s_alloc_tx_queue(dev);
  74        if (IS_ERR(q))
  75                return PTR_ERR(q);
  76
  77        q->qid = MT_MCUQ_WM;
  78        dev->q_mcu[MT_MCUQ_WM] = q;
  79
  80        return 0;
  81}
  82
  83int mt76s_alloc_queues(struct mt76_dev *dev)
  84{
  85        int err;
  86
  87        err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN);
  88        if (err < 0)
  89                return err;
  90
  91        return mt76s_alloc_tx(dev);
  92}
  93EXPORT_SYMBOL_GPL(mt76s_alloc_queues);
  94
  95static struct mt76_queue_entry *
  96mt76s_get_next_rx_entry(struct mt76_queue *q)
  97{
  98        struct mt76_queue_entry *e = NULL;
  99
 100        spin_lock_bh(&q->lock);
 101        if (q->queued > 0) {
 102                e = &q->entry[q->tail];
 103                q->tail = (q->tail + 1) % q->ndesc;
 104                q->queued--;
 105        }
 106        spin_unlock_bh(&q->lock);
 107
 108        return e;
 109}
 110
 111static int
 112mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 113{
 114        int qid = q - &dev->q_rx[MT_RXQ_MAIN];
 115        int nframes = 0;
 116
 117        while (true) {
 118                struct mt76_queue_entry *e;
 119
 120                if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
 121                        break;
 122
 123                e = mt76s_get_next_rx_entry(q);
 124                if (!e || !e->skb)
 125                        break;
 126
 127                dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
 128                e->skb = NULL;
 129                nframes++;
 130        }
 131        if (qid == MT_RXQ_MAIN)
 132                mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
 133
 134        return nframes;
 135}
 136
 137static void mt76s_net_worker(struct mt76_worker *w)
 138{
 139        struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
 140                                              net_worker);
 141        struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
 142        int i, nframes;
 143
 144        do {
 145                nframes = 0;
 146
 147                local_bh_disable();
 148                rcu_read_lock();
 149
 150                mt76_for_each_q_rx(dev, i)
 151                        nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
 152
 153                rcu_read_unlock();
 154                local_bh_enable();
 155        } while (nframes > 0);
 156}
 157
 158static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 159{
 160        struct mt76_queue_entry entry;
 161        int nframes = 0;
 162        bool mcu;
 163
 164        if (!q)
 165                return 0;
 166
 167        mcu = q == dev->q_mcu[MT_MCUQ_WM];
 168        while (q->queued > 0) {
 169                if (!q->entry[q->tail].done)
 170                        break;
 171
 172                entry = q->entry[q->tail];
 173                q->entry[q->tail].done = false;
 174
 175                if (mcu) {
 176                        dev_kfree_skb(entry.skb);
 177                        entry.skb = NULL;
 178                }
 179
 180                mt76_queue_tx_complete(dev, q, &entry);
 181                nframes++;
 182        }
 183
 184        if (!q->queued)
 185                wake_up(&dev->tx_wait);
 186
 187        return nframes;
 188}
 189
 190static void mt76s_status_worker(struct mt76_worker *w)
 191{
 192        struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
 193                                              status_worker);
 194        struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
 195        bool resched = false;
 196        int i, nframes;
 197
 198        do {
 199                int ndata_frames = 0;
 200
 201                nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
 202
 203                for (i = 0; i <= MT_TXQ_PSD; i++)
 204                        ndata_frames += mt76s_process_tx_queue(dev,
 205                                                               dev->phy.q_tx[i]);
 206                nframes += ndata_frames;
 207                if (ndata_frames > 0)
 208                        resched = true;
 209
 210                if (dev->drv->tx_status_data &&
 211                    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
 212                        queue_work(dev->wq, &dev->sdio.stat_work);
 213        } while (nframes > 0);
 214
 215        if (resched)
 216                mt76_worker_schedule(&dev->sdio.txrx_worker);
 217}
 218
 219static void mt76s_tx_status_data(struct work_struct *work)
 220{
 221        struct mt76_sdio *sdio;
 222        struct mt76_dev *dev;
 223        u8 update = 1;
 224        u16 count = 0;
 225
 226        sdio = container_of(work, struct mt76_sdio, stat_work);
 227        dev = container_of(sdio, struct mt76_dev, sdio);
 228
 229        while (true) {
 230                if (test_bit(MT76_REMOVED, &dev->phy.state))
 231                        break;
 232
 233                if (!dev->drv->tx_status_data(dev, &update))
 234                        break;
 235                count++;
 236        }
 237
 238        if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
 239                queue_work(dev->wq, &sdio->stat_work);
 240        else
 241                clear_bit(MT76_READING_STATS, &dev->phy.state);
 242}
 243
 244static int
 245mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
 246                   struct sk_buff *skb, struct mt76_wcid *wcid,
 247                   struct ieee80211_sta *sta)
 248{
 249        struct mt76_tx_info tx_info = {
 250                .skb = skb,
 251        };
 252        int err, len = skb->len;
 253        u16 idx = q->head;
 254
 255        if (q->queued == q->ndesc)
 256                return -ENOSPC;
 257
 258        skb->prev = skb->next = NULL;
 259        err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
 260        if (err < 0)
 261                return err;
 262
 263        q->entry[q->head].skb = tx_info.skb;
 264        q->entry[q->head].buf_sz = len;
 265        q->entry[q->head].wcid = 0xffff;
 266
 267        smp_wmb();
 268
 269        q->head = (q->head + 1) % q->ndesc;
 270        q->queued++;
 271
 272        return idx;
 273}
 274
 275static int
 276mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
 277                       struct sk_buff *skb, u32 tx_info)
 278{
 279        int ret = -ENOSPC, len = skb->len, pad;
 280
 281        if (q->queued == q->ndesc)
 282                goto error;
 283
 284        pad = round_up(skb->len, 4) - skb->len;
 285        ret = mt76_skb_adjust_pad(skb, pad);
 286        if (ret)
 287                goto error;
 288
 289        spin_lock_bh(&q->lock);
 290
 291        q->entry[q->head].buf_sz = len;
 292        q->entry[q->head].skb = skb;
 293        q->head = (q->head + 1) % q->ndesc;
 294        q->queued++;
 295
 296        spin_unlock_bh(&q->lock);
 297
 298        return 0;
 299
 300error:
 301        dev_kfree_skb(skb);
 302
 303        return ret;
 304}
 305
 306static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
 307{
 308        struct mt76_sdio *sdio = &dev->sdio;
 309
 310        mt76_worker_schedule(&sdio->txrx_worker);
 311}
 312
 313static const struct mt76_queue_ops sdio_queue_ops = {
 314        .tx_queue_skb = mt76s_tx_queue_skb,
 315        .kick = mt76s_tx_kick,
 316        .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
 317};
 318
 319void mt76s_deinit(struct mt76_dev *dev)
 320{
 321        struct mt76_sdio *sdio = &dev->sdio;
 322        int i;
 323
 324        mt76_worker_teardown(&sdio->txrx_worker);
 325        mt76_worker_teardown(&sdio->status_worker);
 326        mt76_worker_teardown(&sdio->net_worker);
 327
 328        cancel_work_sync(&sdio->stat_work);
 329        clear_bit(MT76_READING_STATS, &dev->phy.state);
 330
 331        mt76_tx_status_check(dev, NULL, true);
 332
 333        sdio_claim_host(sdio->func);
 334        sdio_release_irq(sdio->func);
 335        sdio_release_host(sdio->func);
 336
 337        mt76_for_each_q_rx(dev, i) {
 338                struct mt76_queue *q = &dev->q_rx[i];
 339                int j;
 340
 341                for (j = 0; j < q->ndesc; j++) {
 342                        struct mt76_queue_entry *e = &q->entry[j];
 343
 344                        if (!e->skb)
 345                                continue;
 346
 347                        dev_kfree_skb(e->skb);
 348                        e->skb = NULL;
 349                }
 350        }
 351}
 352EXPORT_SYMBOL_GPL(mt76s_deinit);
 353
 354int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
 355               const struct mt76_bus_ops *bus_ops)
 356{
 357        struct mt76_sdio *sdio = &dev->sdio;
 358        int err;
 359
 360        err = mt76_worker_setup(dev->hw, &sdio->status_worker,
 361                                mt76s_status_worker, "sdio-status");
 362        if (err)
 363                return err;
 364
 365        err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
 366                                "sdio-net");
 367        if (err)
 368                return err;
 369
 370        sched_set_fifo_low(sdio->status_worker.task);
 371        sched_set_fifo_low(sdio->net_worker.task);
 372
 373        INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
 374
 375        dev->queue_ops = &sdio_queue_ops;
 376        dev->bus = bus_ops;
 377        dev->sdio.func = func;
 378
 379        return 0;
 380}
 381EXPORT_SYMBOL_GPL(mt76s_init);
 382
 383MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
 384MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
 385MODULE_LICENSE("Dual BSD/GPL");
 386