linux/drivers/net/wireless/mediatek/mt7601u/dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
   4 */
   5
   6#include "mt7601u.h"
   7#include "dma.h"
   8#include "usb.h"
   9#include "trace.h"
  10
  11static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
  12                                 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
  13
  14static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
  15{
  16        const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
  17        unsigned int hdrlen;
  18
  19        if (unlikely(len < 10))
  20                return 0;
  21        hdrlen = ieee80211_hdrlen(hdr->frame_control);
  22        if (unlikely(hdrlen > len))
  23                return 0;
  24        return hdrlen;
  25}
  26
  27static struct sk_buff *
  28mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
  29                        void *data, u32 seg_len, u32 truesize, struct page *p)
  30{
  31        struct sk_buff *skb;
  32        u32 true_len, hdr_len = 0, copy, frag;
  33
  34        skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
  35        if (!skb)
  36                return NULL;
  37
  38        true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
  39        if (!true_len || true_len > seg_len)
  40                goto bad_frame;
  41
  42        hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
  43        if (!hdr_len)
  44                goto bad_frame;
  45
  46        if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
  47                skb_put_data(skb, data, hdr_len);
  48
  49                data += hdr_len + 2;
  50                true_len -= hdr_len;
  51                hdr_len = 0;
  52        }
  53
  54        /* If not doing paged RX allocated skb will always have enough space */
  55        copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
  56        frag = true_len - copy;
  57
  58        skb_put_data(skb, data, copy);
  59        data += copy;
  60
  61        if (frag) {
  62                skb_add_rx_frag(skb, 0, p, data - page_address(p),
  63                                frag, truesize);
  64                get_page(p);
  65        }
  66
  67        return skb;
  68
  69bad_frame:
  70        dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
  71                            true_len, hdr_len);
  72        dev_kfree_skb(skb);
  73        return NULL;
  74}
  75
  76static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
  77                                   u32 seg_len, struct page *p,
  78                                   struct list_head *list)
  79{
  80        struct sk_buff *skb;
  81        struct mt7601u_rxwi *rxwi;
  82        u32 fce_info, truesize = seg_len;
  83
  84        /* DMA_INFO field at the beginning of the segment contains only some of
  85         * the information, we need to read the FCE descriptor from the end.
  86         */
  87        fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
  88        seg_len -= MT_FCE_INFO_LEN;
  89
  90        data += MT_DMA_HDR_LEN;
  91        seg_len -= MT_DMA_HDR_LEN;
  92
  93        rxwi = (struct mt7601u_rxwi *) data;
  94        data += sizeof(struct mt7601u_rxwi);
  95        seg_len -= sizeof(struct mt7601u_rxwi);
  96
  97        if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
  98                dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
  99        if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
 100                dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
 101
 102        trace_mt_rx(dev, rxwi, fce_info);
 103
 104        skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
 105        if (!skb)
 106                return;
 107
 108        local_bh_disable();
 109        rcu_read_lock();
 110
 111        ieee80211_rx_list(dev->hw, NULL, skb, list);
 112
 113        rcu_read_unlock();
 114        local_bh_enable();
 115}
 116
 117static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
 118{
 119        u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
 120                sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
 121        u16 dma_len = get_unaligned_le16(data);
 122
 123        if (data_len < min_seg_len ||
 124            WARN_ON_ONCE(!dma_len) ||
 125            WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
 126            WARN_ON_ONCE(dma_len & 0x3))
 127                return 0;
 128
 129        return MT_DMA_HDRS + dma_len;
 130}
 131
 132static void
 133mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
 134{
 135        u32 seg_len, data_len = e->urb->actual_length;
 136        u8 *data = page_address(e->p);
 137        struct page *new_p = NULL;
 138        LIST_HEAD(list);
 139        int cnt = 0;
 140
 141        if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
 142                return;
 143
 144        /* Copy if there is very little data in the buffer. */
 145        if (data_len > 512)
 146                new_p = dev_alloc_pages(MT_RX_ORDER);
 147
 148        while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
 149                mt7601u_rx_process_seg(dev, data, seg_len,
 150                                       new_p ? e->p : NULL, &list);
 151
 152                data_len -= seg_len;
 153                data += seg_len;
 154                cnt++;
 155        }
 156
 157        if (cnt > 1)
 158                trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
 159
 160        netif_receive_skb_list(&list);
 161
 162        if (new_p) {
 163                /* we have one extra ref from the allocator */
 164                put_page(e->p);
 165                e->p = new_p;
 166        }
 167}
 168
 169static struct mt7601u_dma_buf_rx *
 170mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
 171{
 172        struct mt7601u_rx_queue *q = &dev->rx_q;
 173        struct mt7601u_dma_buf_rx *buf = NULL;
 174        unsigned long flags;
 175
 176        spin_lock_irqsave(&dev->rx_lock, flags);
 177
 178        if (!q->pending)
 179                goto out;
 180
 181        buf = &q->e[q->start];
 182        q->pending--;
 183        q->start = (q->start + 1) % q->entries;
 184out:
 185        spin_unlock_irqrestore(&dev->rx_lock, flags);
 186
 187        return buf;
 188}
 189
 190static void mt7601u_complete_rx(struct urb *urb)
 191{
 192        struct mt7601u_dev *dev = urb->context;
 193        struct mt7601u_rx_queue *q = &dev->rx_q;
 194        unsigned long flags;
 195
 196        /* do no schedule rx tasklet if urb has been unlinked
 197         * or the device has been removed
 198         */
 199        switch (urb->status) {
 200        case -ECONNRESET:
 201        case -ESHUTDOWN:
 202        case -ENOENT:
 203        case -EPROTO:
 204                return;
 205        default:
 206                dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
 207                                    urb->status);
 208                fallthrough;
 209        case 0:
 210                break;
 211        }
 212
 213        spin_lock_irqsave(&dev->rx_lock, flags);
 214        if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
 215                goto out;
 216
 217        q->end = (q->end + 1) % q->entries;
 218        q->pending++;
 219        tasklet_schedule(&dev->rx_tasklet);
 220out:
 221        spin_unlock_irqrestore(&dev->rx_lock, flags);
 222}
 223
 224static void mt7601u_rx_tasklet(struct tasklet_struct *t)
 225{
 226        struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet);
 227        struct mt7601u_dma_buf_rx *e;
 228
 229        while ((e = mt7601u_rx_get_pending_entry(dev))) {
 230                if (e->urb->status)
 231                        continue;
 232
 233                mt7601u_rx_process_entry(dev, e);
 234                mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
 235        }
 236}
 237
 238static void mt7601u_complete_tx(struct urb *urb)
 239{
 240        struct mt7601u_tx_queue *q = urb->context;
 241        struct mt7601u_dev *dev = q->dev;
 242        struct sk_buff *skb;
 243        unsigned long flags;
 244
 245        switch (urb->status) {
 246        case -ECONNRESET:
 247        case -ESHUTDOWN:
 248        case -ENOENT:
 249        case -EPROTO:
 250                return;
 251        default:
 252                dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
 253                                    urb->status);
 254                fallthrough;
 255        case 0:
 256                break;
 257        }
 258
 259        spin_lock_irqsave(&dev->tx_lock, flags);
 260        if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
 261                goto out;
 262
 263        skb = q->e[q->start].skb;
 264        q->e[q->start].skb = NULL;
 265        trace_mt_tx_dma_done(dev, skb);
 266
 267        __skb_queue_tail(&dev->tx_skb_done, skb);
 268        tasklet_schedule(&dev->tx_tasklet);
 269
 270        if (q->used == q->entries - q->entries / 8)
 271                ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
 272
 273        q->start = (q->start + 1) % q->entries;
 274        q->used--;
 275out:
 276        spin_unlock_irqrestore(&dev->tx_lock, flags);
 277}
 278
 279static void mt7601u_tx_tasklet(struct tasklet_struct *t)
 280{
 281        struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet);
 282        struct sk_buff_head skbs;
 283        unsigned long flags;
 284
 285        __skb_queue_head_init(&skbs);
 286
 287        spin_lock_irqsave(&dev->tx_lock, flags);
 288
 289        set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
 290        if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
 291                queue_delayed_work(dev->stat_wq, &dev->stat_work,
 292                                   msecs_to_jiffies(10));
 293
 294        skb_queue_splice_init(&dev->tx_skb_done, &skbs);
 295
 296        spin_unlock_irqrestore(&dev->tx_lock, flags);
 297
 298        while (!skb_queue_empty(&skbs)) {
 299                struct sk_buff *skb = __skb_dequeue(&skbs);
 300
 301                mt7601u_tx_status(dev, skb);
 302        }
 303}
 304
 305static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
 306                                 struct sk_buff *skb, u8 ep)
 307{
 308        struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
 309        unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
 310        struct mt7601u_dma_buf_tx *e;
 311        struct mt7601u_tx_queue *q = &dev->tx_q[ep];
 312        unsigned long flags;
 313        int ret;
 314
 315        spin_lock_irqsave(&dev->tx_lock, flags);
 316
 317        if (WARN_ON(q->entries <= q->used)) {
 318                ret = -ENOSPC;
 319                goto out;
 320        }
 321
 322        e = &q->e[q->end];
 323        usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
 324                          mt7601u_complete_tx, q);
 325        ret = usb_submit_urb(e->urb, GFP_ATOMIC);
 326        if (ret) {
 327                /* Special-handle ENODEV from TX urb submission because it will
 328                 * often be the first ENODEV we see after device is removed.
 329                 */
 330                if (ret == -ENODEV)
 331                        set_bit(MT7601U_STATE_REMOVED, &dev->state);
 332                else
 333                        dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
 334                                ret);
 335                goto out;
 336        }
 337
 338        q->end = (q->end + 1) % q->entries;
 339        q->used++;
 340        e->skb = skb;
 341
 342        if (q->used >= q->entries)
 343                ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
 344out:
 345        spin_unlock_irqrestore(&dev->tx_lock, flags);
 346
 347        return ret;
 348}
 349
 350/* Map hardware Q to USB endpoint number */
 351static u8 q2ep(u8 qid)
 352{
 353        /* TODO: take management packets to queue 5 */
 354        return qid + 1;
 355}
 356
 357/* Map USB endpoint number to Q id in the DMA engine */
 358static enum mt76_qsel ep2dmaq(u8 ep)
 359{
 360        if (ep == 5)
 361                return MT_QSEL_MGMT;
 362        return MT_QSEL_EDCA;
 363}
 364
 365int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
 366                           struct mt76_wcid *wcid, int hw_q)
 367{
 368        u8 ep = q2ep(hw_q);
 369        u32 dma_flags;
 370        int ret;
 371
 372        dma_flags = MT_TXD_PKT_INFO_80211;
 373        if (wcid->hw_key_idx == 0xff)
 374                dma_flags |= MT_TXD_PKT_INFO_WIV;
 375
 376        ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
 377        if (ret)
 378                return ret;
 379
 380        ret = mt7601u_dma_submit_tx(dev, skb, ep);
 381        if (ret) {
 382                ieee80211_free_txskb(dev->hw, skb);
 383                return ret;
 384        }
 385
 386        return 0;
 387}
 388
 389static void mt7601u_kill_rx(struct mt7601u_dev *dev)
 390{
 391        int i;
 392
 393        for (i = 0; i < dev->rx_q.entries; i++)
 394                usb_poison_urb(dev->rx_q.e[i].urb);
 395}
 396
 397static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
 398                                 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
 399{
 400        struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
 401        u8 *buf = page_address(e->p);
 402        unsigned pipe;
 403        int ret;
 404
 405        pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
 406
 407        usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
 408                          mt7601u_complete_rx, dev);
 409
 410        trace_mt_submit_urb(dev, e->urb);
 411        ret = usb_submit_urb(e->urb, gfp);
 412        if (ret)
 413                dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
 414
 415        return ret;
 416}
 417
 418static int mt7601u_submit_rx(struct mt7601u_dev *dev)
 419{
 420        int i, ret;
 421
 422        for (i = 0; i < dev->rx_q.entries; i++) {
 423                ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
 424                if (ret)
 425                        return ret;
 426        }
 427
 428        return 0;
 429}
 430
 431static void mt7601u_free_rx(struct mt7601u_dev *dev)
 432{
 433        int i;
 434
 435        for (i = 0; i < dev->rx_q.entries; i++) {
 436                __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
 437                usb_free_urb(dev->rx_q.e[i].urb);
 438        }
 439}
 440
 441static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
 442{
 443        int i;
 444
 445        memset(&dev->rx_q, 0, sizeof(dev->rx_q));
 446        dev->rx_q.dev = dev;
 447        dev->rx_q.entries = N_RX_ENTRIES;
 448
 449        for (i = 0; i < N_RX_ENTRIES; i++) {
 450                dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
 451                dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
 452
 453                if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
 454                        return -ENOMEM;
 455        }
 456
 457        return 0;
 458}
 459
 460static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
 461{
 462        int i;
 463
 464        for (i = 0; i < q->entries; i++)  {
 465                usb_poison_urb(q->e[i].urb);
 466                if (q->e[i].skb)
 467                        mt7601u_tx_status(q->dev, q->e[i].skb);
 468                usb_free_urb(q->e[i].urb);
 469        }
 470}
 471
 472static void mt7601u_free_tx(struct mt7601u_dev *dev)
 473{
 474        int i;
 475
 476        if (!dev->tx_q)
 477                return;
 478
 479        for (i = 0; i < __MT_EP_OUT_MAX; i++)
 480                mt7601u_free_tx_queue(&dev->tx_q[i]);
 481}
 482
 483static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
 484                                  struct mt7601u_tx_queue *q)
 485{
 486        int i;
 487
 488        q->dev = dev;
 489        q->entries = N_TX_ENTRIES;
 490
 491        for (i = 0; i < N_TX_ENTRIES; i++) {
 492                q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
 493                if (!q->e[i].urb)
 494                        return -ENOMEM;
 495        }
 496
 497        return 0;
 498}
 499
 500static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
 501{
 502        int i;
 503
 504        dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
 505                                 sizeof(*dev->tx_q), GFP_KERNEL);
 506        if (!dev->tx_q)
 507                return -ENOMEM;
 508
 509        for (i = 0; i < __MT_EP_OUT_MAX; i++)
 510                if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
 511                        return -ENOMEM;
 512
 513        return 0;
 514}
 515
 516int mt7601u_dma_init(struct mt7601u_dev *dev)
 517{
 518        int ret = -ENOMEM;
 519
 520        tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet);
 521        tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet);
 522
 523        ret = mt7601u_alloc_tx(dev);
 524        if (ret)
 525                goto err;
 526        ret = mt7601u_alloc_rx(dev);
 527        if (ret)
 528                goto err;
 529
 530        ret = mt7601u_submit_rx(dev);
 531        if (ret)
 532                goto err;
 533
 534        return 0;
 535err:
 536        mt7601u_dma_cleanup(dev);
 537        return ret;
 538}
 539
 540void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
 541{
 542        mt7601u_kill_rx(dev);
 543
 544        tasklet_kill(&dev->rx_tasklet);
 545
 546        mt7601u_free_rx(dev);
 547        mt7601u_free_tx(dev);
 548
 549        tasklet_kill(&dev->tx_tasklet);
 550}
 551