linux/drivers/net/wireless/mediatek/mt7601u/dma.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2
   6 * as published by the Free Software Foundation
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include "mt7601u.h"
  15#include "dma.h"
  16#include "usb.h"
  17#include "trace.h"
  18
  19static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
  20                                 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
  21
  22static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
  23{
  24        const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
  25        unsigned int hdrlen;
  26
  27        if (unlikely(len < 10))
  28                return 0;
  29        hdrlen = ieee80211_hdrlen(hdr->frame_control);
  30        if (unlikely(hdrlen > len))
  31                return 0;
  32        return hdrlen;
  33}
  34
  35static struct sk_buff *
  36mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
  37                        void *data, u32 seg_len, u32 truesize, struct page *p)
  38{
  39        struct sk_buff *skb;
  40        u32 true_len, hdr_len = 0, copy, frag;
  41
  42        skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
  43        if (!skb)
  44                return NULL;
  45
  46        true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
  47        if (!true_len || true_len > seg_len)
  48                goto bad_frame;
  49
  50        hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
  51        if (!hdr_len)
  52                goto bad_frame;
  53
  54        if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
  55                memcpy(skb_put(skb, hdr_len), data, hdr_len);
  56
  57                data += hdr_len + 2;
  58                true_len -= hdr_len;
  59                hdr_len = 0;
  60        }
  61
  62        /* If not doing paged RX allocated skb will always have enough space */
  63        copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
  64        frag = true_len - copy;
  65
  66        memcpy(skb_put(skb, copy), data, copy);
  67        data += copy;
  68
  69        if (frag) {
  70                skb_add_rx_frag(skb, 0, p, data - page_address(p),
  71                                frag, truesize);
  72                get_page(p);
  73        }
  74
  75        return skb;
  76
  77bad_frame:
  78        dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
  79                            true_len, hdr_len);
  80        dev_kfree_skb(skb);
  81        return NULL;
  82}
  83
  84static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
  85                                   u32 seg_len, struct page *p)
  86{
  87        struct sk_buff *skb;
  88        struct mt7601u_rxwi *rxwi;
  89        u32 fce_info, truesize = seg_len;
  90
  91        /* DMA_INFO field at the beginning of the segment contains only some of
  92         * the information, we need to read the FCE descriptor from the end.
  93         */
  94        fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
  95        seg_len -= MT_FCE_INFO_LEN;
  96
  97        data += MT_DMA_HDR_LEN;
  98        seg_len -= MT_DMA_HDR_LEN;
  99
 100        rxwi = (struct mt7601u_rxwi *) data;
 101        data += sizeof(struct mt7601u_rxwi);
 102        seg_len -= sizeof(struct mt7601u_rxwi);
 103
 104        if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
 105                dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
 106        if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
 107                dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
 108
 109        trace_mt_rx(dev, rxwi, fce_info);
 110
 111        skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
 112        if (!skb)
 113                return;
 114
 115        spin_lock(&dev->mac_lock);
 116        ieee80211_rx(dev->hw, skb);
 117        spin_unlock(&dev->mac_lock);
 118}
 119
 120static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
 121{
 122        u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
 123                sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
 124        u16 dma_len = get_unaligned_le16(data);
 125
 126        if (data_len < min_seg_len ||
 127            WARN_ON(!dma_len) ||
 128            WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
 129            WARN_ON(dma_len & 0x3))
 130                return 0;
 131
 132        return MT_DMA_HDRS + dma_len;
 133}
 134
 135static void
 136mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
 137{
 138        u32 seg_len, data_len = e->urb->actual_length;
 139        u8 *data = page_address(e->p);
 140        struct page *new_p = NULL;
 141        int cnt = 0;
 142
 143        if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
 144                return;
 145
 146        /* Copy if there is very little data in the buffer. */
 147        if (data_len > 512)
 148                new_p = dev_alloc_pages(MT_RX_ORDER);
 149
 150        while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
 151                mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
 152
 153                data_len -= seg_len;
 154                data += seg_len;
 155                cnt++;
 156        }
 157
 158        if (cnt > 1)
 159                trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
 160
 161        if (new_p) {
 162                /* we have one extra ref from the allocator */
 163                __free_pages(e->p, MT_RX_ORDER);
 164
 165                e->p = new_p;
 166        }
 167}
 168
 169static struct mt7601u_dma_buf_rx *
 170mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
 171{
 172        struct mt7601u_rx_queue *q = &dev->rx_q;
 173        struct mt7601u_dma_buf_rx *buf = NULL;
 174        unsigned long flags;
 175
 176        spin_lock_irqsave(&dev->rx_lock, flags);
 177
 178        if (!q->pending)
 179                goto out;
 180
 181        buf = &q->e[q->start];
 182        q->pending--;
 183        q->start = (q->start + 1) % q->entries;
 184out:
 185        spin_unlock_irqrestore(&dev->rx_lock, flags);
 186
 187        return buf;
 188}
 189
 190static void mt7601u_complete_rx(struct urb *urb)
 191{
 192        struct mt7601u_dev *dev = urb->context;
 193        struct mt7601u_rx_queue *q = &dev->rx_q;
 194        unsigned long flags;
 195
 196        spin_lock_irqsave(&dev->rx_lock, flags);
 197
 198        if (mt7601u_urb_has_error(urb))
 199                dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
 200        if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
 201                goto out;
 202
 203        q->end = (q->end + 1) % q->entries;
 204        q->pending++;
 205        tasklet_schedule(&dev->rx_tasklet);
 206out:
 207        spin_unlock_irqrestore(&dev->rx_lock, flags);
 208}
 209
 210static void mt7601u_rx_tasklet(unsigned long data)
 211{
 212        struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
 213        struct mt7601u_dma_buf_rx *e;
 214
 215        while ((e = mt7601u_rx_get_pending_entry(dev))) {
 216                if (e->urb->status)
 217                        continue;
 218
 219                mt7601u_rx_process_entry(dev, e);
 220                mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
 221        }
 222}
 223
 224static void mt7601u_complete_tx(struct urb *urb)
 225{
 226        struct mt7601u_tx_queue *q = urb->context;
 227        struct mt7601u_dev *dev = q->dev;
 228        struct sk_buff *skb;
 229        unsigned long flags;
 230
 231        spin_lock_irqsave(&dev->tx_lock, flags);
 232
 233        if (mt7601u_urb_has_error(urb))
 234                dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
 235        if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
 236                goto out;
 237
 238        skb = q->e[q->start].skb;
 239        trace_mt_tx_dma_done(dev, skb);
 240
 241        __skb_queue_tail(&dev->tx_skb_done, skb);
 242        tasklet_schedule(&dev->tx_tasklet);
 243
 244        if (q->used == q->entries - q->entries / 8)
 245                ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
 246
 247        q->start = (q->start + 1) % q->entries;
 248        q->used--;
 249out:
 250        spin_unlock_irqrestore(&dev->tx_lock, flags);
 251}
 252
 253static void mt7601u_tx_tasklet(unsigned long data)
 254{
 255        struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
 256        struct sk_buff_head skbs;
 257        unsigned long flags;
 258
 259        __skb_queue_head_init(&skbs);
 260
 261        spin_lock_irqsave(&dev->tx_lock, flags);
 262
 263        set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
 264        if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
 265                queue_delayed_work(dev->stat_wq, &dev->stat_work,
 266                                   msecs_to_jiffies(10));
 267
 268        skb_queue_splice_init(&dev->tx_skb_done, &skbs);
 269
 270        spin_unlock_irqrestore(&dev->tx_lock, flags);
 271
 272        while (!skb_queue_empty(&skbs)) {
 273                struct sk_buff *skb = __skb_dequeue(&skbs);
 274
 275                mt7601u_tx_status(dev, skb);
 276        }
 277}
 278
 279static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
 280                                 struct sk_buff *skb, u8 ep)
 281{
 282        struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
 283        unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
 284        struct mt7601u_dma_buf_tx *e;
 285        struct mt7601u_tx_queue *q = &dev->tx_q[ep];
 286        unsigned long flags;
 287        int ret;
 288
 289        spin_lock_irqsave(&dev->tx_lock, flags);
 290
 291        if (WARN_ON(q->entries <= q->used)) {
 292                ret = -ENOSPC;
 293                goto out;
 294        }
 295
 296        e = &q->e[q->end];
 297        e->skb = skb;
 298        usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
 299                          mt7601u_complete_tx, q);
 300        ret = usb_submit_urb(e->urb, GFP_ATOMIC);
 301        if (ret) {
 302                /* Special-handle ENODEV from TX urb submission because it will
 303                 * often be the first ENODEV we see after device is removed.
 304                 */
 305                if (ret == -ENODEV)
 306                        set_bit(MT7601U_STATE_REMOVED, &dev->state);
 307                else
 308                        dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
 309                                ret);
 310                goto out;
 311        }
 312
 313        q->end = (q->end + 1) % q->entries;
 314        q->used++;
 315
 316        if (q->used >= q->entries)
 317                ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
 318out:
 319        spin_unlock_irqrestore(&dev->tx_lock, flags);
 320
 321        return ret;
 322}
 323
 324/* Map hardware Q to USB endpoint number */
 325static u8 q2ep(u8 qid)
 326{
 327        /* TODO: take management packets to queue 5 */
 328        return qid + 1;
 329}
 330
 331/* Map USB endpoint number to Q id in the DMA engine */
 332static enum mt76_qsel ep2dmaq(u8 ep)
 333{
 334        if (ep == 5)
 335                return MT_QSEL_MGMT;
 336        return MT_QSEL_EDCA;
 337}
 338
 339int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
 340                           struct mt76_wcid *wcid, int hw_q)
 341{
 342        u8 ep = q2ep(hw_q);
 343        u32 dma_flags;
 344        int ret;
 345
 346        dma_flags = MT_TXD_PKT_INFO_80211;
 347        if (wcid->hw_key_idx == 0xff)
 348                dma_flags |= MT_TXD_PKT_INFO_WIV;
 349
 350        ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
 351        if (ret)
 352                return ret;
 353
 354        ret = mt7601u_dma_submit_tx(dev, skb, ep);
 355        if (ret) {
 356                ieee80211_free_txskb(dev->hw, skb);
 357                return ret;
 358        }
 359
 360        return 0;
 361}
 362
 363static void mt7601u_kill_rx(struct mt7601u_dev *dev)
 364{
 365        int i;
 366        unsigned long flags;
 367
 368        spin_lock_irqsave(&dev->rx_lock, flags);
 369
 370        for (i = 0; i < dev->rx_q.entries; i++) {
 371                int next = dev->rx_q.end;
 372
 373                spin_unlock_irqrestore(&dev->rx_lock, flags);
 374                usb_poison_urb(dev->rx_q.e[next].urb);
 375                spin_lock_irqsave(&dev->rx_lock, flags);
 376        }
 377
 378        spin_unlock_irqrestore(&dev->rx_lock, flags);
 379}
 380
 381static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
 382                                 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
 383{
 384        struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
 385        u8 *buf = page_address(e->p);
 386        unsigned pipe;
 387        int ret;
 388
 389        pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
 390
 391        usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
 392                          mt7601u_complete_rx, dev);
 393
 394        trace_mt_submit_urb(dev, e->urb);
 395        ret = usb_submit_urb(e->urb, gfp);
 396        if (ret)
 397                dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
 398
 399        return ret;
 400}
 401
 402static int mt7601u_submit_rx(struct mt7601u_dev *dev)
 403{
 404        int i, ret;
 405
 406        for (i = 0; i < dev->rx_q.entries; i++) {
 407                ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
 408                if (ret)
 409                        return ret;
 410        }
 411
 412        return 0;
 413}
 414
 415static void mt7601u_free_rx(struct mt7601u_dev *dev)
 416{
 417        int i;
 418
 419        for (i = 0; i < dev->rx_q.entries; i++) {
 420                __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
 421                usb_free_urb(dev->rx_q.e[i].urb);
 422        }
 423}
 424
 425static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
 426{
 427        int i;
 428
 429        memset(&dev->rx_q, 0, sizeof(dev->rx_q));
 430        dev->rx_q.dev = dev;
 431        dev->rx_q.entries = N_RX_ENTRIES;
 432
 433        for (i = 0; i < N_RX_ENTRIES; i++) {
 434                dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
 435                dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
 436
 437                if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
 438                        return -ENOMEM;
 439        }
 440
 441        return 0;
 442}
 443
 444static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
 445{
 446        int i;
 447
 448        WARN_ON(q->used);
 449
 450        for (i = 0; i < q->entries; i++)  {
 451                usb_poison_urb(q->e[i].urb);
 452                usb_free_urb(q->e[i].urb);
 453        }
 454}
 455
 456static void mt7601u_free_tx(struct mt7601u_dev *dev)
 457{
 458        int i;
 459
 460        for (i = 0; i < __MT_EP_OUT_MAX; i++)
 461                mt7601u_free_tx_queue(&dev->tx_q[i]);
 462}
 463
 464static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
 465                                  struct mt7601u_tx_queue *q)
 466{
 467        int i;
 468
 469        q->dev = dev;
 470        q->entries = N_TX_ENTRIES;
 471
 472        for (i = 0; i < N_TX_ENTRIES; i++) {
 473                q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
 474                if (!q->e[i].urb)
 475                        return -ENOMEM;
 476        }
 477
 478        return 0;
 479}
 480
 481static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
 482{
 483        int i;
 484
 485        dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
 486                                 sizeof(*dev->tx_q), GFP_KERNEL);
 487
 488        for (i = 0; i < __MT_EP_OUT_MAX; i++)
 489                if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
 490                        return -ENOMEM;
 491
 492        return 0;
 493}
 494
 495int mt7601u_dma_init(struct mt7601u_dev *dev)
 496{
 497        int ret = -ENOMEM;
 498
 499        tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
 500        tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
 501
 502        ret = mt7601u_alloc_tx(dev);
 503        if (ret)
 504                goto err;
 505        ret = mt7601u_alloc_rx(dev);
 506        if (ret)
 507                goto err;
 508
 509        ret = mt7601u_submit_rx(dev);
 510        if (ret)
 511                goto err;
 512
 513        return 0;
 514err:
 515        mt7601u_dma_cleanup(dev);
 516        return ret;
 517}
 518
 519void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
 520{
 521        mt7601u_kill_rx(dev);
 522
 523        tasklet_kill(&dev->rx_tasklet);
 524
 525        mt7601u_free_rx(dev);
 526        mt7601u_free_tx(dev);
 527
 528        tasklet_kill(&dev->tx_tasklet);
 529}
 530