linux/drivers/net/wireless/mediatek/mt76/tx.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
   3 *
   4 * Permission to use, copy, modify, and/or distribute this software for any
   5 * purpose with or without fee is hereby granted, provided that the above
   6 * copyright notice and this permission notice appear in all copies.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15 */
  16
  17#include "mt76.h"
  18
  19static struct mt76_txwi_cache *
  20mt76_alloc_txwi(struct mt76_dev *dev)
  21{
  22        struct mt76_txwi_cache *t;
  23        dma_addr_t addr;
  24        int size;
  25
  26        size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
  27        t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
  28        if (!t)
  29                return NULL;
  30
  31        addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
  32                              DMA_TO_DEVICE);
  33        t->dma_addr = addr;
  34
  35        return t;
  36}
  37
  38static struct mt76_txwi_cache *
  39__mt76_get_txwi(struct mt76_dev *dev)
  40{
  41        struct mt76_txwi_cache *t = NULL;
  42
  43        spin_lock_bh(&dev->lock);
  44        if (!list_empty(&dev->txwi_cache)) {
  45                t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
  46                                     list);
  47                list_del(&t->list);
  48        }
  49        spin_unlock_bh(&dev->lock);
  50
  51        return t;
  52}
  53
  54static struct mt76_txwi_cache *
  55mt76_get_txwi(struct mt76_dev *dev)
  56{
  57        struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
  58
  59        if (t)
  60                return t;
  61
  62        return mt76_alloc_txwi(dev);
  63}
  64
  65void
  66mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
  67{
  68        if (!t)
  69                return;
  70
  71        spin_lock_bh(&dev->lock);
  72        list_add(&t->list, &dev->txwi_cache);
  73        spin_unlock_bh(&dev->lock);
  74}
  75
  76void mt76_tx_free(struct mt76_dev *dev)
  77{
  78        struct mt76_txwi_cache *t;
  79
  80        while ((t = __mt76_get_txwi(dev)) != NULL)
  81                dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
  82                                 DMA_TO_DEVICE);
  83}
  84
  85static int
  86mt76_txq_get_qid(struct ieee80211_txq *txq)
  87{
  88        if (!txq->sta)
  89                return MT_TXQ_BE;
  90
  91        return txq->ac;
  92}
  93
  94int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
  95                      struct sk_buff *skb, struct mt76_wcid *wcid,
  96                      struct ieee80211_sta *sta)
  97{
  98        struct mt76_queue_entry e;
  99        struct mt76_txwi_cache *t;
 100        struct mt76_queue_buf buf[32];
 101        struct sk_buff *iter;
 102        dma_addr_t addr;
 103        int len;
 104        u32 tx_info = 0;
 105        int n, ret;
 106
 107        t = mt76_get_txwi(dev);
 108        if (!t) {
 109                ieee80211_free_txskb(dev->hw, skb);
 110                return -ENOMEM;
 111        }
 112
 113        dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
 114                                DMA_TO_DEVICE);
 115        ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
 116                                       &tx_info);
 117        dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
 118                                   DMA_TO_DEVICE);
 119        if (ret < 0)
 120                goto free;
 121
 122        len = skb->len - skb->data_len;
 123        addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
 124        if (dma_mapping_error(dev->dev, addr)) {
 125                ret = -ENOMEM;
 126                goto free;
 127        }
 128
 129        n = 0;
 130        buf[n].addr = t->dma_addr;
 131        buf[n++].len = dev->drv->txwi_size;
 132        buf[n].addr = addr;
 133        buf[n++].len = len;
 134
 135        skb_walk_frags(skb, iter) {
 136                if (n == ARRAY_SIZE(buf))
 137                        goto unmap;
 138
 139                addr = dma_map_single(dev->dev, iter->data, iter->len,
 140                                      DMA_TO_DEVICE);
 141                if (dma_mapping_error(dev->dev, addr))
 142                        goto unmap;
 143
 144                buf[n].addr = addr;
 145                buf[n++].len = iter->len;
 146        }
 147
 148        if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
 149                goto unmap;
 150
 151        return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
 152
 153unmap:
 154        ret = -ENOMEM;
 155        for (n--; n > 0; n--)
 156                dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
 157                                 DMA_TO_DEVICE);
 158
 159free:
 160        e.skb = skb;
 161        e.txwi = t;
 162        dev->drv->tx_complete_skb(dev, q, &e, true);
 163        mt76_put_txwi(dev, t);
 164        return ret;
 165}
 166EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
 167
 168void
 169mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
 170        struct mt76_wcid *wcid, struct sk_buff *skb)
 171{
 172        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 173        struct mt76_queue *q;
 174        int qid = skb_get_queue_mapping(skb);
 175
 176        if (WARN_ON(qid >= MT_TXQ_PSD)) {
 177                qid = MT_TXQ_BE;
 178                skb_set_queue_mapping(skb, qid);
 179        }
 180
 181        if (!wcid->tx_rate_set)
 182                ieee80211_get_tx_rates(info->control.vif, sta, skb,
 183                                       info->control.rates, 1);
 184
 185        q = &dev->q_tx[qid];
 186
 187        spin_lock_bh(&q->lock);
 188        mt76_tx_queue_skb(dev, q, skb, wcid, sta);
 189        dev->queue_ops->kick(dev, q);
 190
 191        if (q->queued > q->ndesc - 8)
 192                ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
 193        spin_unlock_bh(&q->lock);
 194}
 195EXPORT_SYMBOL_GPL(mt76_tx);
 196
 197static struct sk_buff *
 198mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
 199{
 200        struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
 201        struct sk_buff *skb;
 202
 203        skb = skb_dequeue(&mtxq->retry_q);
 204        if (skb) {
 205                u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 206
 207                if (ps && skb_queue_empty(&mtxq->retry_q))
 208                        ieee80211_sta_set_buffered(txq->sta, tid, false);
 209
 210                return skb;
 211        }
 212
 213        skb = ieee80211_tx_dequeue(dev->hw, txq);
 214        if (!skb)
 215                return NULL;
 216
 217        return skb;
 218}
 219
 220static void
 221mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
 222{
 223        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 224
 225        if (!ieee80211_is_data_qos(hdr->frame_control))
 226                return;
 227
 228        mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
 229}
 230
 231static void
 232mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
 233                  struct sk_buff *skb, bool last)
 234{
 235        struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
 236        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 237        struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
 238
 239        info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
 240        if (last)
 241                info->flags |= IEEE80211_TX_STATUS_EOSP;
 242
 243        mt76_skb_set_moredata(skb, !last);
 244        mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
 245}
 246
 247void
 248mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
 249                             u16 tids, int nframes,
 250                             enum ieee80211_frame_release_type reason,
 251                             bool more_data)
 252{
 253        struct mt76_dev *dev = hw->priv;
 254        struct sk_buff *last_skb = NULL;
 255        struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
 256        int i;
 257
 258        spin_lock_bh(&hwq->lock);
 259        for (i = 0; tids && nframes; i++, tids >>= 1) {
 260                struct ieee80211_txq *txq = sta->txq[i];
 261                struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
 262                struct sk_buff *skb;
 263
 264                if (!(tids & 1))
 265                        continue;
 266
 267                do {
 268                        skb = mt76_txq_dequeue(dev, mtxq, true);
 269                        if (!skb)
 270                                break;
 271
 272                        if (mtxq->aggr)
 273                                mt76_check_agg_ssn(mtxq, skb);
 274
 275                        nframes--;
 276                        if (last_skb)
 277                                mt76_queue_ps_skb(dev, sta, last_skb, false);
 278
 279                        last_skb = skb;
 280                } while (nframes);
 281        }
 282
 283        if (last_skb) {
 284                mt76_queue_ps_skb(dev, sta, last_skb, true);
 285                dev->queue_ops->kick(dev, hwq);
 286        }
 287        spin_unlock_bh(&hwq->lock);
 288}
 289EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
 290
 291static int
 292mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
 293                    struct mt76_txq *mtxq, bool *empty)
 294{
 295        struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
 296        struct ieee80211_tx_info *info;
 297        struct mt76_wcid *wcid = mtxq->wcid;
 298        struct sk_buff *skb;
 299        int n_frames = 1, limit;
 300        struct ieee80211_tx_rate tx_rate;
 301        bool ampdu;
 302        bool probe;
 303        int idx;
 304
 305        skb = mt76_txq_dequeue(dev, mtxq, false);
 306        if (!skb) {
 307                *empty = true;
 308                return 0;
 309        }
 310
 311        info = IEEE80211_SKB_CB(skb);
 312        if (!wcid->tx_rate_set)
 313                ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
 314                                       info->control.rates, 1);
 315        tx_rate = info->control.rates[0];
 316
 317        probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
 318        ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
 319        limit = ampdu ? 16 : 3;
 320
 321        if (ampdu)
 322                mt76_check_agg_ssn(mtxq, skb);
 323
 324        idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
 325
 326        if (idx < 0)
 327                return idx;
 328
 329        do {
 330                bool cur_ampdu;
 331
 332                if (probe)
 333                        break;
 334
 335                if (test_bit(MT76_SCANNING, &dev->state) ||
 336                    test_bit(MT76_RESET, &dev->state))
 337                        return -EBUSY;
 338
 339                skb = mt76_txq_dequeue(dev, mtxq, false);
 340                if (!skb) {
 341                        *empty = true;
 342                        break;
 343                }
 344
 345                info = IEEE80211_SKB_CB(skb);
 346                cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
 347
 348                if (ampdu != cur_ampdu ||
 349                    (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
 350                        skb_queue_tail(&mtxq->retry_q, skb);
 351                        break;
 352                }
 353
 354                info->control.rates[0] = tx_rate;
 355
 356                if (cur_ampdu)
 357                        mt76_check_agg_ssn(mtxq, skb);
 358
 359                idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
 360                if (idx < 0)
 361                        return idx;
 362
 363                n_frames++;
 364        } while (n_frames < limit);
 365
 366        if (!probe) {
 367                hwq->swq_queued++;
 368                hwq->entry[idx].schedule = true;
 369        }
 370
 371        dev->queue_ops->kick(dev, hwq);
 372
 373        return n_frames;
 374}
 375
 376static int
 377mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
 378{
 379        struct mt76_txq *mtxq, *mtxq_last;
 380        int len = 0;
 381
 382restart:
 383        mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
 384        while (!list_empty(&hwq->swq)) {
 385                bool empty = false;
 386                int cur;
 387
 388                mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
 389                if (mtxq->send_bar && mtxq->aggr) {
 390                        struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
 391                        struct ieee80211_sta *sta = txq->sta;
 392                        struct ieee80211_vif *vif = txq->vif;
 393                        u16 agg_ssn = mtxq->agg_ssn;
 394                        u8 tid = txq->tid;
 395
 396                        mtxq->send_bar = false;
 397                        spin_unlock_bh(&hwq->lock);
 398                        ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
 399                        spin_lock_bh(&hwq->lock);
 400                        goto restart;
 401                }
 402
 403                list_del_init(&mtxq->list);
 404
 405                cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
 406                if (!empty)
 407                        list_add_tail(&mtxq->list, &hwq->swq);
 408
 409                if (cur < 0)
 410                        return cur;
 411
 412                len += cur;
 413
 414                if (mtxq == mtxq_last)
 415                        break;
 416        }
 417
 418        return len;
 419}
 420
 421void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
 422{
 423        int len;
 424
 425        do {
 426                if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
 427                        break;
 428
 429                len = mt76_txq_schedule_list(dev, hwq);
 430        } while (len > 0);
 431}
 432EXPORT_SYMBOL_GPL(mt76_txq_schedule);
 433
 434void mt76_txq_schedule_all(struct mt76_dev *dev)
 435{
 436        int i;
 437
 438        for (i = 0; i <= MT_TXQ_BK; i++) {
 439                struct mt76_queue *q = &dev->q_tx[i];
 440
 441                spin_lock_bh(&q->lock);
 442                mt76_txq_schedule(dev, q);
 443                spin_unlock_bh(&q->lock);
 444        }
 445}
 446EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
 447
 448void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
 449                         bool send_bar)
 450{
 451        int i;
 452
 453        for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
 454                struct ieee80211_txq *txq = sta->txq[i];
 455                struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
 456
 457                spin_lock_bh(&mtxq->hwq->lock);
 458                mtxq->send_bar = mtxq->aggr && send_bar;
 459                if (!list_empty(&mtxq->list))
 460                        list_del_init(&mtxq->list);
 461                spin_unlock_bh(&mtxq->hwq->lock);
 462        }
 463}
 464EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
 465
 466void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
 467{
 468        struct mt76_dev *dev = hw->priv;
 469        struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
 470        struct mt76_queue *hwq = mtxq->hwq;
 471
 472        spin_lock_bh(&hwq->lock);
 473        if (list_empty(&mtxq->list))
 474                list_add_tail(&mtxq->list, &hwq->swq);
 475        mt76_txq_schedule(dev, hwq);
 476        spin_unlock_bh(&hwq->lock);
 477}
 478EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
 479
 480void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
 481{
 482        struct mt76_txq *mtxq;
 483        struct mt76_queue *hwq;
 484        struct sk_buff *skb;
 485
 486        if (!txq)
 487                return;
 488
 489        mtxq = (struct mt76_txq *) txq->drv_priv;
 490        hwq = mtxq->hwq;
 491
 492        spin_lock_bh(&hwq->lock);
 493        if (!list_empty(&mtxq->list))
 494                list_del(&mtxq->list);
 495        spin_unlock_bh(&hwq->lock);
 496
 497        while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
 498                ieee80211_free_txskb(dev->hw, skb);
 499}
 500EXPORT_SYMBOL_GPL(mt76_txq_remove);
 501
 502void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
 503{
 504        struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
 505
 506        INIT_LIST_HEAD(&mtxq->list);
 507        skb_queue_head_init(&mtxq->retry_q);
 508
 509        mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
 510}
 511EXPORT_SYMBOL_GPL(mt76_txq_init);
 512