linux/drivers/net/wireless/mediatek/mt7601u/dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
   4 */
   5
   6#include "mt7601u.h"
   7#include "dma.h"
   8#include "usb.h"
   9#include "trace.h"
  10
  11static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
  12                                 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
  13
  14static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
  15{
  16        const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
  17        unsigned int hdrlen;
  18
  19        if (unlikely(len < 10))
  20                return 0;
  21        hdrlen = ieee80211_hdrlen(hdr->frame_control);
  22        if (unlikely(hdrlen > len))
  23                return 0;
  24        return hdrlen;
  25}
  26
  27static struct sk_buff *
  28mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
  29                        void *data, u32 seg_len, u32 truesize, struct page *p)
  30{
  31        struct sk_buff *skb;
  32        u32 true_len, hdr_len = 0, copy, frag;
  33
  34        skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
  35        if (!skb)
  36                return NULL;
  37
  38        true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
  39        if (!true_len || true_len > seg_len)
  40                goto bad_frame;
  41
  42        hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
  43        if (!hdr_len)
  44                goto bad_frame;
  45
  46        if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
  47                skb_put_data(skb, data, hdr_len);
  48
  49                data += hdr_len + 2;
  50                true_len -= hdr_len;
  51                hdr_len = 0;
  52        }
  53
  54        /* If not doing paged RX allocated skb will always have enough space */
  55        copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
  56        frag = true_len - copy;
  57
  58        skb_put_data(skb, data, copy);
  59        data += copy;
  60
  61        if (frag) {
  62                skb_add_rx_frag(skb, 0, p, data - page_address(p),
  63                                frag, truesize);
  64                get_page(p);
  65        }
  66
  67        return skb;
  68
  69bad_frame:
  70        dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
  71                            true_len, hdr_len);
  72        dev_kfree_skb(skb);
  73        return NULL;
  74}
  75
  76static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
  77                                   u32 seg_len, struct page *p)
  78{
  79        struct sk_buff *skb;
  80        struct mt7601u_rxwi *rxwi;
  81        u32 fce_info, truesize = seg_len;
  82
  83        /* DMA_INFO field at the beginning of the segment contains only some of
  84         * the information, we need to read the FCE descriptor from the end.
  85         */
  86        fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
  87        seg_len -= MT_FCE_INFO_LEN;
  88
  89        data += MT_DMA_HDR_LEN;
  90        seg_len -= MT_DMA_HDR_LEN;
  91
  92        rxwi = (struct mt7601u_rxwi *) data;
  93        data += sizeof(struct mt7601u_rxwi);
  94        seg_len -= sizeof(struct mt7601u_rxwi);
  95
  96        if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
  97                dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
  98        if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
  99                dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
 100
 101        trace_mt_rx(dev, rxwi, fce_info);
 102
 103        skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
 104        if (!skb)
 105                return;
 106
 107        spin_lock(&dev->mac_lock);
 108        ieee80211_rx(dev->hw, skb);
 109        spin_unlock(&dev->mac_lock);
 110}
 111
 112static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
 113{
 114        u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
 115                sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
 116        u16 dma_len = get_unaligned_le16(data);
 117
 118        if (data_len < min_seg_len ||
 119            WARN_ON_ONCE(!dma_len) ||
 120            WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
 121            WARN_ON_ONCE(dma_len & 0x3))
 122                return 0;
 123
 124        return MT_DMA_HDRS + dma_len;
 125}
 126
 127static void
 128mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
 129{
 130        u32 seg_len, data_len = e->urb->actual_length;
 131        u8 *data = page_address(e->p);
 132        struct page *new_p = NULL;
 133        int cnt = 0;
 134
 135        if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
 136                return;
 137
 138        /* Copy if there is very little data in the buffer. */
 139        if (data_len > 512)
 140                new_p = dev_alloc_pages(MT_RX_ORDER);
 141
 142        while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
 143                mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
 144
 145                data_len -= seg_len;
 146                data += seg_len;
 147                cnt++;
 148        }
 149
 150        if (cnt > 1)
 151                trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
 152
 153        if (new_p) {
 154                /* we have one extra ref from the allocator */
 155                __free_pages(e->p, MT_RX_ORDER);
 156
 157                e->p = new_p;
 158        }
 159}
 160
 161static struct mt7601u_dma_buf_rx *
 162mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
 163{
 164        struct mt7601u_rx_queue *q = &dev->rx_q;
 165        struct mt7601u_dma_buf_rx *buf = NULL;
 166        unsigned long flags;
 167
 168        spin_lock_irqsave(&dev->rx_lock, flags);
 169
 170        if (!q->pending)
 171                goto out;
 172
 173        buf = &q->e[q->start];
 174        q->pending--;
 175        q->start = (q->start + 1) % q->entries;
 176out:
 177        spin_unlock_irqrestore(&dev->rx_lock, flags);
 178
 179        return buf;
 180}
 181
 182static void mt7601u_complete_rx(struct urb *urb)
 183{
 184        struct mt7601u_dev *dev = urb->context;
 185        struct mt7601u_rx_queue *q = &dev->rx_q;
 186        unsigned long flags;
 187
 188        /* do no schedule rx tasklet if urb has been unlinked
 189         * or the device has been removed
 190         */
 191        switch (urb->status) {
 192        case -ECONNRESET:
 193        case -ESHUTDOWN:
 194        case -ENOENT:
 195                return;
 196        default:
 197                dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
 198                                    urb->status);
 199                /* fall through */
 200        case 0:
 201                break;
 202        }
 203
 204        spin_lock_irqsave(&dev->rx_lock, flags);
 205        if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
 206                goto out;
 207
 208        q->end = (q->end + 1) % q->entries;
 209        q->pending++;
 210        tasklet_schedule(&dev->rx_tasklet);
 211out:
 212        spin_unlock_irqrestore(&dev->rx_lock, flags);
 213}
 214
 215static void mt7601u_rx_tasklet(unsigned long data)
 216{
 217        struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
 218        struct mt7601u_dma_buf_rx *e;
 219
 220        while ((e = mt7601u_rx_get_pending_entry(dev))) {
 221                if (e->urb->status)
 222                        continue;
 223
 224                mt7601u_rx_process_entry(dev, e);
 225                mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
 226        }
 227}
 228
 229static void mt7601u_complete_tx(struct urb *urb)
 230{
 231        struct mt7601u_tx_queue *q = urb->context;
 232        struct mt7601u_dev *dev = q->dev;
 233        struct sk_buff *skb;
 234        unsigned long flags;
 235
 236        switch (urb->status) {
 237        case -ECONNRESET:
 238        case -ESHUTDOWN:
 239        case -ENOENT:
 240                return;
 241        default:
 242                dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
 243                                    urb->status);
 244                /* fall through */
 245        case 0:
 246                break;
 247        }
 248
 249        spin_lock_irqsave(&dev->tx_lock, flags);
 250        if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
 251                goto out;
 252
 253        skb = q->e[q->start].skb;
 254        q->e[q->start].skb = NULL;
 255        trace_mt_tx_dma_done(dev, skb);
 256
 257        __skb_queue_tail(&dev->tx_skb_done, skb);
 258        tasklet_schedule(&dev->tx_tasklet);
 259
 260        if (q->used == q->entries - q->entries / 8)
 261                ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
 262
 263        q->start = (q->start + 1) % q->entries;
 264        q->used--;
 265out:
 266        spin_unlock_irqrestore(&dev->tx_lock, flags);
 267}
 268
 269static void mt7601u_tx_tasklet(unsigned long data)
 270{
 271        struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
 272        struct sk_buff_head skbs;
 273        unsigned long flags;
 274
 275        __skb_queue_head_init(&skbs);
 276
 277        spin_lock_irqsave(&dev->tx_lock, flags);
 278
 279        set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
 280        if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
 281                queue_delayed_work(dev->stat_wq, &dev->stat_work,
 282                                   msecs_to_jiffies(10));
 283
 284        skb_queue_splice_init(&dev->tx_skb_done, &skbs);
 285
 286        spin_unlock_irqrestore(&dev->tx_lock, flags);
 287
 288        while (!skb_queue_empty(&skbs)) {
 289                struct sk_buff *skb = __skb_dequeue(&skbs);
 290
 291                mt7601u_tx_status(dev, skb);
 292        }
 293}
 294
 295static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
 296                                 struct sk_buff *skb, u8 ep)
 297{
 298        struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
 299        unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
 300        struct mt7601u_dma_buf_tx *e;
 301        struct mt7601u_tx_queue *q = &dev->tx_q[ep];
 302        unsigned long flags;
 303        int ret;
 304
 305        spin_lock_irqsave(&dev->tx_lock, flags);
 306
 307        if (WARN_ON(q->entries <= q->used)) {
 308                ret = -ENOSPC;
 309                goto out;
 310        }
 311
 312        e = &q->e[q->end];
 313        e->skb = skb;
 314        usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
 315                          mt7601u_complete_tx, q);
 316        ret = usb_submit_urb(e->urb, GFP_ATOMIC);
 317        if (ret) {
 318                /* Special-handle ENODEV from TX urb submission because it will
 319                 * often be the first ENODEV we see after device is removed.
 320                 */
 321                if (ret == -ENODEV)
 322                        set_bit(MT7601U_STATE_REMOVED, &dev->state);
 323                else
 324                        dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
 325                                ret);
 326                goto out;
 327        }
 328
 329        q->end = (q->end + 1) % q->entries;
 330        q->used++;
 331
 332        if (q->used >= q->entries)
 333                ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
 334out:
 335        spin_unlock_irqrestore(&dev->tx_lock, flags);
 336
 337        return ret;
 338}
 339
 340/* Map hardware Q to USB endpoint number */
 341static u8 q2ep(u8 qid)
 342{
 343        /* TODO: take management packets to queue 5 */
 344        return qid + 1;
 345}
 346
 347/* Map USB endpoint number to Q id in the DMA engine */
 348static enum mt76_qsel ep2dmaq(u8 ep)
 349{
 350        if (ep == 5)
 351                return MT_QSEL_MGMT;
 352        return MT_QSEL_EDCA;
 353}
 354
 355int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
 356                           struct mt76_wcid *wcid, int hw_q)
 357{
 358        u8 ep = q2ep(hw_q);
 359        u32 dma_flags;
 360        int ret;
 361
 362        dma_flags = MT_TXD_PKT_INFO_80211;
 363        if (wcid->hw_key_idx == 0xff)
 364                dma_flags |= MT_TXD_PKT_INFO_WIV;
 365
 366        ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
 367        if (ret)
 368                return ret;
 369
 370        ret = mt7601u_dma_submit_tx(dev, skb, ep);
 371        if (ret) {
 372                ieee80211_free_txskb(dev->hw, skb);
 373                return ret;
 374        }
 375
 376        return 0;
 377}
 378
 379static void mt7601u_kill_rx(struct mt7601u_dev *dev)
 380{
 381        int i;
 382
 383        for (i = 0; i < dev->rx_q.entries; i++)
 384                usb_poison_urb(dev->rx_q.e[i].urb);
 385}
 386
 387static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
 388                                 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
 389{
 390        struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
 391        u8 *buf = page_address(e->p);
 392        unsigned pipe;
 393        int ret;
 394
 395        pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
 396
 397        usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
 398                          mt7601u_complete_rx, dev);
 399
 400        trace_mt_submit_urb(dev, e->urb);
 401        ret = usb_submit_urb(e->urb, gfp);
 402        if (ret)
 403                dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
 404
 405        return ret;
 406}
 407
 408static int mt7601u_submit_rx(struct mt7601u_dev *dev)
 409{
 410        int i, ret;
 411
 412        for (i = 0; i < dev->rx_q.entries; i++) {
 413                ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
 414                if (ret)
 415                        return ret;
 416        }
 417
 418        return 0;
 419}
 420
 421static void mt7601u_free_rx(struct mt7601u_dev *dev)
 422{
 423        int i;
 424
 425        for (i = 0; i < dev->rx_q.entries; i++) {
 426                __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
 427                usb_free_urb(dev->rx_q.e[i].urb);
 428        }
 429}
 430
 431static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
 432{
 433        int i;
 434
 435        memset(&dev->rx_q, 0, sizeof(dev->rx_q));
 436        dev->rx_q.dev = dev;
 437        dev->rx_q.entries = N_RX_ENTRIES;
 438
 439        for (i = 0; i < N_RX_ENTRIES; i++) {
 440                dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
 441                dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
 442
 443                if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
 444                        return -ENOMEM;
 445        }
 446
 447        return 0;
 448}
 449
 450static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
 451{
 452        int i;
 453
 454        for (i = 0; i < q->entries; i++)  {
 455                usb_poison_urb(q->e[i].urb);
 456                if (q->e[i].skb)
 457                        mt7601u_tx_status(q->dev, q->e[i].skb);
 458                usb_free_urb(q->e[i].urb);
 459        }
 460}
 461
 462static void mt7601u_free_tx(struct mt7601u_dev *dev)
 463{
 464        int i;
 465
 466        if (!dev->tx_q)
 467                return;
 468
 469        for (i = 0; i < __MT_EP_OUT_MAX; i++)
 470                mt7601u_free_tx_queue(&dev->tx_q[i]);
 471}
 472
 473static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
 474                                  struct mt7601u_tx_queue *q)
 475{
 476        int i;
 477
 478        q->dev = dev;
 479        q->entries = N_TX_ENTRIES;
 480
 481        for (i = 0; i < N_TX_ENTRIES; i++) {
 482                q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
 483                if (!q->e[i].urb)
 484                        return -ENOMEM;
 485        }
 486
 487        return 0;
 488}
 489
 490static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
 491{
 492        int i;
 493
 494        dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
 495                                 sizeof(*dev->tx_q), GFP_KERNEL);
 496        if (!dev->tx_q)
 497                return -ENOMEM;
 498
 499        for (i = 0; i < __MT_EP_OUT_MAX; i++)
 500                if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
 501                        return -ENOMEM;
 502
 503        return 0;
 504}
 505
 506int mt7601u_dma_init(struct mt7601u_dev *dev)
 507{
 508        int ret = -ENOMEM;
 509
 510        tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
 511        tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
 512
 513        ret = mt7601u_alloc_tx(dev);
 514        if (ret)
 515                goto err;
 516        ret = mt7601u_alloc_rx(dev);
 517        if (ret)
 518                goto err;
 519
 520        ret = mt7601u_submit_rx(dev);
 521        if (ret)
 522                goto err;
 523
 524        return 0;
 525err:
 526        mt7601u_dma_cleanup(dev);
 527        return ret;
 528}
 529
 530void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
 531{
 532        mt7601u_kill_rx(dev);
 533
 534        tasklet_kill(&dev->rx_tasklet);
 535
 536        mt7601u_free_rx(dev);
 537        mt7601u_free_tx(dev);
 538
 539        tasklet_kill(&dev->tx_tasklet);
 540}
 541