linux/drivers/net/wireless/mediatek/mt76/usb.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
   3 *
   4 * Permission to use, copy, modify, and/or distribute this software for any
   5 * purpose with or without fee is hereby granted, provided that the above
   6 * copyright notice and this permission notice appear in all copies.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15 */
  16
  17#include <linux/module.h>
  18#include "mt76.h"
  19#include "usb_trace.h"
  20#include "dma.h"
  21
  22#define MT_VEND_REQ_MAX_RETRY   10
  23#define MT_VEND_REQ_TOUT_MS     300
  24
  25static bool disable_usb_sg;
  26module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
  27MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
  28
  29/* should be called with usb_ctrl_mtx locked */
  30static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  31                                  u8 req_type, u16 val, u16 offset,
  32                                  void *buf, size_t len)
  33{
  34        struct usb_device *udev = to_usb_device(dev->dev);
  35        unsigned int pipe;
  36        int i, ret;
  37
  38        pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
  39                                       : usb_sndctrlpipe(udev, 0);
  40        for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
  41                if (test_bit(MT76_REMOVED, &dev->state))
  42                        return -EIO;
  43
  44                ret = usb_control_msg(udev, pipe, req, req_type, val,
  45                                      offset, buf, len, MT_VEND_REQ_TOUT_MS);
  46                if (ret == -ENODEV)
  47                        set_bit(MT76_REMOVED, &dev->state);
  48                if (ret >= 0 || ret == -ENODEV)
  49                        return ret;
  50                usleep_range(5000, 10000);
  51        }
  52
  53        dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
  54                req, offset, ret);
  55        return ret;
  56}
  57
  58int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  59                         u8 req_type, u16 val, u16 offset,
  60                         void *buf, size_t len)
  61{
  62        int ret;
  63
  64        mutex_lock(&dev->usb.usb_ctrl_mtx);
  65        ret = __mt76u_vendor_request(dev, req, req_type,
  66                                     val, offset, buf, len);
  67        trace_usb_reg_wr(dev, offset, val);
  68        mutex_unlock(&dev->usb.usb_ctrl_mtx);
  69
  70        return ret;
  71}
  72EXPORT_SYMBOL_GPL(mt76u_vendor_request);
  73
  74/* should be called with usb_ctrl_mtx locked */
  75static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
  76{
  77        struct mt76_usb *usb = &dev->usb;
  78        u32 data = ~0;
  79        u16 offset;
  80        int ret;
  81        u8 req;
  82
  83        switch (addr & MT_VEND_TYPE_MASK) {
  84        case MT_VEND_TYPE_EEPROM:
  85                req = MT_VEND_READ_EEPROM;
  86                break;
  87        case MT_VEND_TYPE_CFG:
  88                req = MT_VEND_READ_CFG;
  89                break;
  90        default:
  91                req = MT_VEND_MULTI_READ;
  92                break;
  93        }
  94        offset = addr & ~MT_VEND_TYPE_MASK;
  95
  96        ret = __mt76u_vendor_request(dev, req,
  97                                     USB_DIR_IN | USB_TYPE_VENDOR,
  98                                     0, offset, usb->data, sizeof(__le32));
  99        if (ret == sizeof(__le32))
 100                data = get_unaligned_le32(usb->data);
 101        trace_usb_reg_rr(dev, addr, data);
 102
 103        return data;
 104}
 105
 106static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
 107{
 108        u32 ret;
 109
 110        mutex_lock(&dev->usb.usb_ctrl_mtx);
 111        ret = __mt76u_rr(dev, addr);
 112        mutex_unlock(&dev->usb.usb_ctrl_mtx);
 113
 114        return ret;
 115}
 116
 117/* should be called with usb_ctrl_mtx locked */
 118static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 119{
 120        struct mt76_usb *usb = &dev->usb;
 121        u16 offset;
 122        u8 req;
 123
 124        switch (addr & MT_VEND_TYPE_MASK) {
 125        case MT_VEND_TYPE_CFG:
 126                req = MT_VEND_WRITE_CFG;
 127                break;
 128        default:
 129                req = MT_VEND_MULTI_WRITE;
 130                break;
 131        }
 132        offset = addr & ~MT_VEND_TYPE_MASK;
 133
 134        put_unaligned_le32(val, usb->data);
 135        __mt76u_vendor_request(dev, req,
 136                               USB_DIR_OUT | USB_TYPE_VENDOR, 0,
 137                               offset, usb->data, sizeof(__le32));
 138        trace_usb_reg_wr(dev, addr, val);
 139}
 140
 141static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 142{
 143        mutex_lock(&dev->usb.usb_ctrl_mtx);
 144        __mt76u_wr(dev, addr, val);
 145        mutex_unlock(&dev->usb.usb_ctrl_mtx);
 146}
 147
 148static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
 149                     u32 mask, u32 val)
 150{
 151        mutex_lock(&dev->usb.usb_ctrl_mtx);
 152        val |= __mt76u_rr(dev, addr) & ~mask;
 153        __mt76u_wr(dev, addr, val);
 154        mutex_unlock(&dev->usb.usb_ctrl_mtx);
 155
 156        return val;
 157}
 158
 159static void mt76u_copy(struct mt76_dev *dev, u32 offset,
 160                       const void *data, int len)
 161{
 162        struct mt76_usb *usb = &dev->usb;
 163        const u32 *val = data;
 164        int i, ret;
 165
 166        mutex_lock(&usb->usb_ctrl_mtx);
 167        for (i = 0; i < (len / 4); i++) {
 168                put_unaligned_le32(val[i], usb->data);
 169                ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
 170                                             USB_DIR_OUT | USB_TYPE_VENDOR,
 171                                             0, offset + i * 4, usb->data,
 172                                             sizeof(__le32));
 173                if (ret < 0)
 174                        break;
 175        }
 176        mutex_unlock(&usb->usb_ctrl_mtx);
 177}
 178
 179void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
 180                     const u16 offset, const u32 val)
 181{
 182        mutex_lock(&dev->usb.usb_ctrl_mtx);
 183        __mt76u_vendor_request(dev, req,
 184                               USB_DIR_OUT | USB_TYPE_VENDOR,
 185                               val & 0xffff, offset, NULL, 0);
 186        __mt76u_vendor_request(dev, req,
 187                               USB_DIR_OUT | USB_TYPE_VENDOR,
 188                               val >> 16, offset + 2, NULL, 0);
 189        mutex_unlock(&dev->usb.usb_ctrl_mtx);
 190}
 191EXPORT_SYMBOL_GPL(mt76u_single_wr);
 192
 193static int
 194mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
 195                const struct mt76_reg_pair *data, int len)
 196{
 197        struct mt76_usb *usb = &dev->usb;
 198
 199        mutex_lock(&usb->usb_ctrl_mtx);
 200        while (len > 0) {
 201                __mt76u_wr(dev, base + data->reg, data->value);
 202                len--;
 203                data++;
 204        }
 205        mutex_unlock(&usb->usb_ctrl_mtx);
 206
 207        return 0;
 208}
 209
 210static int
 211mt76u_wr_rp(struct mt76_dev *dev, u32 base,
 212            const struct mt76_reg_pair *data, int n)
 213{
 214        if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
 215                return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
 216        else
 217                return mt76u_req_wr_rp(dev, base, data, n);
 218}
 219
 220static int
 221mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
 222                int len)
 223{
 224        struct mt76_usb *usb = &dev->usb;
 225
 226        mutex_lock(&usb->usb_ctrl_mtx);
 227        while (len > 0) {
 228                data->value = __mt76u_rr(dev, base + data->reg);
 229                len--;
 230                data++;
 231        }
 232        mutex_unlock(&usb->usb_ctrl_mtx);
 233
 234        return 0;
 235}
 236
 237static int
 238mt76u_rd_rp(struct mt76_dev *dev, u32 base,
 239            struct mt76_reg_pair *data, int n)
 240{
 241        if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
 242                return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
 243        else
 244                return mt76u_req_rd_rp(dev, base, data, n);
 245}
 246
 247static bool mt76u_check_sg(struct mt76_dev *dev)
 248{
 249        struct usb_device *udev = to_usb_device(dev->dev);
 250
 251        return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
 252                (udev->bus->no_sg_constraint ||
 253                 udev->speed == USB_SPEED_WIRELESS));
 254}
 255
 256static int
 257mt76u_set_endpoints(struct usb_interface *intf,
 258                    struct mt76_usb *usb)
 259{
 260        struct usb_host_interface *intf_desc = intf->cur_altsetting;
 261        struct usb_endpoint_descriptor *ep_desc;
 262        int i, in_ep = 0, out_ep = 0;
 263
 264        for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
 265                ep_desc = &intf_desc->endpoint[i].desc;
 266
 267                if (usb_endpoint_is_bulk_in(ep_desc) &&
 268                    in_ep < __MT_EP_IN_MAX) {
 269                        usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
 270                        usb->in_max_packet = usb_endpoint_maxp(ep_desc);
 271                        in_ep++;
 272                } else if (usb_endpoint_is_bulk_out(ep_desc) &&
 273                           out_ep < __MT_EP_OUT_MAX) {
 274                        usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
 275                        usb->out_max_packet = usb_endpoint_maxp(ep_desc);
 276                        out_ep++;
 277                }
 278        }
 279
 280        if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
 281                return -EINVAL;
 282        return 0;
 283}
 284
 285static int
 286mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
 287                 int nsgs, gfp_t gfp)
 288{
 289        int i;
 290
 291        for (i = 0; i < nsgs; i++) {
 292                struct page *page;
 293                void *data;
 294                int offset;
 295
 296                data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
 297                if (!data)
 298                        break;
 299
 300                page = virt_to_head_page(data);
 301                offset = data - page_address(page);
 302                sg_set_page(&urb->sg[i], page, q->buf_size, offset);
 303        }
 304
 305        if (i < nsgs) {
 306                int j;
 307
 308                for (j = nsgs; j < urb->num_sgs; j++)
 309                        skb_free_frag(sg_virt(&urb->sg[j]));
 310                urb->num_sgs = i;
 311        }
 312
 313        urb->num_sgs = max_t(int, i, urb->num_sgs);
 314        urb->transfer_buffer_length = urb->num_sgs * q->buf_size,
 315        sg_init_marker(urb->sg, urb->num_sgs);
 316
 317        return i ? : -ENOMEM;
 318}
 319
 320static int
 321mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
 322{
 323        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 324
 325        if (dev->usb.sg_en) {
 326                return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
 327        } else {
 328                urb->transfer_buffer_length = q->buf_size;
 329                urb->transfer_buffer = page_frag_alloc(&q->rx_page,
 330                                                       q->buf_size, gfp);
 331                return urb->transfer_buffer ? 0 : -ENOMEM;
 332        }
 333}
 334
 335static int
 336mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
 337{
 338        unsigned int size = sizeof(struct urb);
 339
 340        if (dev->usb.sg_en)
 341                size += MT_SG_MAX_SIZE * sizeof(struct scatterlist);
 342
 343        e->urb = kzalloc(size, GFP_KERNEL);
 344        if (!e->urb)
 345                return -ENOMEM;
 346
 347        usb_init_urb(e->urb);
 348
 349        if (dev->usb.sg_en)
 350                e->urb->sg = (struct scatterlist *)(e->urb + 1);
 351
 352        return 0;
 353}
 354
 355static int
 356mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
 357{
 358        int err;
 359
 360        err = mt76u_urb_alloc(dev, e);
 361        if (err)
 362                return err;
 363
 364        return mt76u_refill_rx(dev, e->urb, MT_SG_MAX_SIZE, GFP_KERNEL);
 365}
 366
 367static void mt76u_urb_free(struct urb *urb)
 368{
 369        int i;
 370
 371        for (i = 0; i < urb->num_sgs; i++)
 372                skb_free_frag(sg_virt(&urb->sg[i]));
 373
 374        if (urb->transfer_buffer)
 375                skb_free_frag(urb->transfer_buffer);
 376
 377        usb_free_urb(urb);
 378}
 379
 380static void
 381mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
 382                    struct urb *urb, usb_complete_t complete_fn,
 383                    void *context)
 384{
 385        struct usb_device *udev = to_usb_device(dev->dev);
 386        unsigned int pipe;
 387
 388        if (dir == USB_DIR_IN)
 389                pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
 390        else
 391                pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
 392
 393        urb->dev = udev;
 394        urb->pipe = pipe;
 395        urb->complete = complete_fn;
 396        urb->context = context;
 397}
 398
 399static inline struct urb *
 400mt76u_get_next_rx_entry(struct mt76_dev *dev)
 401{
 402        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 403        struct urb *urb = NULL;
 404        unsigned long flags;
 405
 406        spin_lock_irqsave(&q->lock, flags);
 407        if (q->queued > 0) {
 408                urb = q->entry[q->head].urb;
 409                q->head = (q->head + 1) % q->ndesc;
 410                q->queued--;
 411        }
 412        spin_unlock_irqrestore(&q->lock, flags);
 413
 414        return urb;
 415}
 416
 417static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
 418{
 419        u16 dma_len, min_len;
 420
 421        dma_len = get_unaligned_le16(data);
 422        min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
 423                  MT_FCE_INFO_LEN;
 424
 425        if (data_len < min_len || !dma_len ||
 426            dma_len + MT_DMA_HDR_LEN > data_len ||
 427            (dma_len & 0x3))
 428                return -EINVAL;
 429        return dma_len;
 430}
 431
 432static int
 433mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
 434{
 435        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 436        u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
 437        int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
 438        int len, nsgs = 1;
 439        struct sk_buff *skb;
 440
 441        if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
 442                return 0;
 443
 444        len = mt76u_get_rx_entry_len(data, urb->actual_length);
 445        if (len < 0)
 446                return 0;
 447
 448        data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
 449        if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) {
 450                dev_err_ratelimited(dev->dev, "rx data too big %d\n", data_len);
 451                return 0;
 452        }
 453
 454        skb = build_skb(data, q->buf_size);
 455        if (!skb)
 456                return 0;
 457
 458        skb_reserve(skb, MT_DMA_HDR_LEN);
 459        __skb_put(skb, data_len);
 460        len -= data_len;
 461
 462        while (len > 0 && nsgs < urb->num_sgs) {
 463                data_len = min_t(int, len, urb->sg[nsgs].length);
 464                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 465                                sg_page(&urb->sg[nsgs]),
 466                                urb->sg[nsgs].offset,
 467                                data_len, q->buf_size);
 468                len -= data_len;
 469                nsgs++;
 470        }
 471        dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
 472
 473        return nsgs;
 474}
 475
 476static void mt76u_complete_rx(struct urb *urb)
 477{
 478        struct mt76_dev *dev = urb->context;
 479        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 480        unsigned long flags;
 481
 482        trace_rx_urb(dev, urb);
 483
 484        switch (urb->status) {
 485        case -ECONNRESET:
 486        case -ESHUTDOWN:
 487        case -ENOENT:
 488                return;
 489        default:
 490                dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
 491                                    urb->status);
 492                /* fall through */
 493        case 0:
 494                break;
 495        }
 496
 497        spin_lock_irqsave(&q->lock, flags);
 498        if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
 499                goto out;
 500
 501        q->tail = (q->tail + 1) % q->ndesc;
 502        q->queued++;
 503        tasklet_schedule(&dev->usb.rx_tasklet);
 504out:
 505        spin_unlock_irqrestore(&q->lock, flags);
 506}
 507
 508static int
 509mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
 510{
 511        mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
 512                            mt76u_complete_rx, dev);
 513        trace_submit_urb(dev, urb);
 514
 515        return usb_submit_urb(urb, GFP_ATOMIC);
 516}
 517
 518static void mt76u_rx_tasklet(unsigned long data)
 519{
 520        struct mt76_dev *dev = (struct mt76_dev *)data;
 521        struct urb *urb;
 522        int err, count;
 523
 524        rcu_read_lock();
 525
 526        while (true) {
 527                urb = mt76u_get_next_rx_entry(dev);
 528                if (!urb)
 529                        break;
 530
 531                count = mt76u_process_rx_entry(dev, urb);
 532                if (count > 0) {
 533                        err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
 534                        if (err < 0)
 535                                break;
 536                }
 537                mt76u_submit_rx_buf(dev, urb);
 538        }
 539        mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
 540
 541        rcu_read_unlock();
 542}
 543
 544static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
 545{
 546        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 547        unsigned long flags;
 548        int i, err = 0;
 549
 550        spin_lock_irqsave(&q->lock, flags);
 551        for (i = 0; i < q->ndesc; i++) {
 552                err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
 553                if (err < 0)
 554                        break;
 555        }
 556        q->head = q->tail = 0;
 557        q->queued = 0;
 558        spin_unlock_irqrestore(&q->lock, flags);
 559
 560        return err;
 561}
 562
 563static int mt76u_alloc_rx(struct mt76_dev *dev)
 564{
 565        struct mt76_usb *usb = &dev->usb;
 566        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 567        int i, err;
 568
 569        usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
 570        if (!usb->mcu.data)
 571                return -ENOMEM;
 572
 573        spin_lock_init(&q->lock);
 574        q->entry = devm_kcalloc(dev->dev,
 575                                MT_NUM_RX_ENTRIES, sizeof(*q->entry),
 576                                GFP_KERNEL);
 577        if (!q->entry)
 578                return -ENOMEM;
 579
 580        q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
 581        q->ndesc = MT_NUM_RX_ENTRIES;
 582        for (i = 0; i < q->ndesc; i++) {
 583                err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
 584                if (err < 0)
 585                        return err;
 586        }
 587
 588        return mt76u_submit_rx_buffers(dev);
 589}
 590
 591static void mt76u_free_rx(struct mt76_dev *dev)
 592{
 593        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 594        struct page *page;
 595        int i;
 596
 597        for (i = 0; i < q->ndesc; i++)
 598                mt76u_urb_free(q->entry[i].urb);
 599
 600        if (!q->rx_page.va)
 601                return;
 602
 603        page = virt_to_page(q->rx_page.va);
 604        __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
 605        memset(&q->rx_page, 0, sizeof(q->rx_page));
 606}
 607
 608void mt76u_stop_rx(struct mt76_dev *dev)
 609{
 610        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 611        int i;
 612
 613        for (i = 0; i < q->ndesc; i++)
 614                usb_poison_urb(q->entry[i].urb);
 615
 616        tasklet_kill(&dev->usb.rx_tasklet);
 617}
 618EXPORT_SYMBOL_GPL(mt76u_stop_rx);
 619
 620int mt76u_resume_rx(struct mt76_dev *dev)
 621{
 622        struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
 623        int i;
 624
 625        for (i = 0; i < q->ndesc; i++)
 626                usb_unpoison_urb(q->entry[i].urb);
 627
 628        return mt76u_submit_rx_buffers(dev);
 629}
 630EXPORT_SYMBOL_GPL(mt76u_resume_rx);
 631
 632static void mt76u_tx_tasklet(unsigned long data)
 633{
 634        struct mt76_dev *dev = (struct mt76_dev *)data;
 635        struct mt76_queue_entry entry;
 636        struct mt76_sw_queue *sq;
 637        struct mt76_queue *q;
 638        bool wake;
 639        int i;
 640
 641        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 642                u32 n_dequeued = 0, n_sw_dequeued = 0;
 643
 644                sq = &dev->q_tx[i];
 645                q = sq->q;
 646
 647                while (q->queued > n_dequeued) {
 648                        if (!q->entry[q->head].done)
 649                                break;
 650
 651                        if (q->entry[q->head].schedule) {
 652                                q->entry[q->head].schedule = false;
 653                                n_sw_dequeued++;
 654                        }
 655
 656                        entry = q->entry[q->head];
 657                        q->entry[q->head].done = false;
 658                        q->head = (q->head + 1) % q->ndesc;
 659                        n_dequeued++;
 660
 661                        dev->drv->tx_complete_skb(dev, i, &entry);
 662                }
 663
 664                spin_lock_bh(&q->lock);
 665
 666                sq->swq_queued -= n_sw_dequeued;
 667                q->queued -= n_dequeued;
 668
 669                wake = q->stopped && q->queued < q->ndesc - 8;
 670                if (wake)
 671                        q->stopped = false;
 672
 673                if (!q->queued)
 674                        wake_up(&dev->tx_wait);
 675
 676                spin_unlock_bh(&q->lock);
 677
 678                mt76_txq_schedule(dev, i);
 679
 680                if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
 681                        ieee80211_queue_delayed_work(dev->hw,
 682                                                     &dev->usb.stat_work,
 683                                                     msecs_to_jiffies(10));
 684
 685                if (wake)
 686                        ieee80211_wake_queue(dev->hw, i);
 687        }
 688}
 689
 690static void mt76u_tx_status_data(struct work_struct *work)
 691{
 692        struct mt76_usb *usb;
 693        struct mt76_dev *dev;
 694        u8 update = 1;
 695        u16 count = 0;
 696
 697        usb = container_of(work, struct mt76_usb, stat_work.work);
 698        dev = container_of(usb, struct mt76_dev, usb);
 699
 700        while (true) {
 701                if (test_bit(MT76_REMOVED, &dev->state))
 702                        break;
 703
 704                if (!dev->drv->tx_status_data(dev, &update))
 705                        break;
 706                count++;
 707        }
 708
 709        if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
 710                ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
 711                                             msecs_to_jiffies(10));
 712        else
 713                clear_bit(MT76_READING_STATS, &dev->state);
 714}
 715
 716static void mt76u_complete_tx(struct urb *urb)
 717{
 718        struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
 719        struct mt76_queue_entry *e = urb->context;
 720
 721        if (mt76u_urb_error(urb))
 722                dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
 723        e->done = true;
 724
 725        tasklet_schedule(&dev->tx_tasklet);
 726}
 727
 728static int
 729mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
 730                       struct urb *urb)
 731{
 732        urb->transfer_buffer_length = skb->len;
 733
 734        if (!dev->usb.sg_en) {
 735                urb->transfer_buffer = skb->data;
 736                return 0;
 737        } else {
 738                sg_init_table(urb->sg, MT_SG_MAX_SIZE);
 739                urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
 740                if (urb->num_sgs == 0)
 741                        return -ENOMEM;
 742                return urb->num_sgs;
 743        }
 744}
 745
 746static int
 747mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
 748                   struct sk_buff *skb, struct mt76_wcid *wcid,
 749                   struct ieee80211_sta *sta)
 750{
 751        struct mt76_queue *q = dev->q_tx[qid].q;
 752        struct mt76_tx_info tx_info = {
 753                .skb = skb,
 754        };
 755        u16 idx = q->tail;
 756        int err;
 757
 758        if (q->queued == q->ndesc)
 759                return -ENOSPC;
 760
 761        skb->prev = skb->next = NULL;
 762        err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
 763        if (err < 0)
 764                return err;
 765
 766        err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
 767        if (err < 0)
 768                return err;
 769
 770        mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
 771                            q->entry[idx].urb, mt76u_complete_tx,
 772                            &q->entry[idx]);
 773
 774        q->tail = (q->tail + 1) % q->ndesc;
 775        q->entry[idx].skb = tx_info.skb;
 776        q->queued++;
 777
 778        return idx;
 779}
 780
 781static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
 782{
 783        struct urb *urb;
 784        int err;
 785
 786        while (q->first != q->tail) {
 787                urb = q->entry[q->first].urb;
 788
 789                trace_submit_urb(dev, urb);
 790                err = usb_submit_urb(urb, GFP_ATOMIC);
 791                if (err < 0) {
 792                        if (err == -ENODEV)
 793                                set_bit(MT76_REMOVED, &dev->state);
 794                        else
 795                                dev_err(dev->dev, "tx urb submit failed:%d\n",
 796                                        err);
 797                        break;
 798                }
 799                q->first = (q->first + 1) % q->ndesc;
 800        }
 801}
 802
 803static int mt76u_alloc_tx(struct mt76_dev *dev)
 804{
 805        struct mt76_queue *q;
 806        int i, j, err;
 807
 808        for (i = 0; i <= MT_TXQ_PSD; i++) {
 809                INIT_LIST_HEAD(&dev->q_tx[i].swq);
 810
 811                if (i >= IEEE80211_NUM_ACS) {
 812                        dev->q_tx[i].q = dev->q_tx[0].q;
 813                        continue;
 814                }
 815
 816                q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
 817                if (!q)
 818                        return -ENOMEM;
 819
 820                spin_lock_init(&q->lock);
 821                q->hw_idx = mt76_ac_to_hwq(i);
 822                dev->q_tx[i].q = q;
 823
 824                q->entry = devm_kcalloc(dev->dev,
 825                                        MT_NUM_TX_ENTRIES, sizeof(*q->entry),
 826                                        GFP_KERNEL);
 827                if (!q->entry)
 828                        return -ENOMEM;
 829
 830                q->ndesc = MT_NUM_TX_ENTRIES;
 831                for (j = 0; j < q->ndesc; j++) {
 832                        err = mt76u_urb_alloc(dev, &q->entry[j]);
 833                        if (err < 0)
 834                                return err;
 835                }
 836        }
 837        return 0;
 838}
 839
 840static void mt76u_free_tx(struct mt76_dev *dev)
 841{
 842        struct mt76_queue *q;
 843        int i, j;
 844
 845        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 846                q = dev->q_tx[i].q;
 847                for (j = 0; j < q->ndesc; j++)
 848                        usb_free_urb(q->entry[j].urb);
 849        }
 850}
 851
 852void mt76u_stop_tx(struct mt76_dev *dev)
 853{
 854        struct mt76_queue_entry entry;
 855        struct mt76_queue *q;
 856        int i, j, ret;
 857
 858        ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), HZ/5);
 859        if (!ret) {
 860                dev_err(dev->dev, "timed out waiting for pending tx\n");
 861
 862                for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 863                        q = dev->q_tx[i].q;
 864                        for (j = 0; j < q->ndesc; j++)
 865                                usb_kill_urb(q->entry[j].urb);
 866                }
 867
 868                tasklet_kill(&dev->tx_tasklet);
 869
 870                /* On device removal we maight queue skb's, but mt76u_tx_kick()
 871                 * will fail to submit urb, cleanup those skb's manually.
 872                 */
 873                for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 874                        q = dev->q_tx[i].q;
 875
 876                        /* Assure we are in sync with killed tasklet. */
 877                        spin_lock_bh(&q->lock);
 878                        while (q->queued) {
 879                                entry = q->entry[q->head];
 880                                q->head = (q->head + 1) % q->ndesc;
 881                                q->queued--;
 882
 883                                dev->drv->tx_complete_skb(dev, i, &entry);
 884                        }
 885                        spin_unlock_bh(&q->lock);
 886                }
 887        }
 888
 889        cancel_delayed_work_sync(&dev->usb.stat_work);
 890        clear_bit(MT76_READING_STATS, &dev->state);
 891
 892        mt76_tx_status_check(dev, NULL, true);
 893}
 894EXPORT_SYMBOL_GPL(mt76u_stop_tx);
 895
 896void mt76u_queues_deinit(struct mt76_dev *dev)
 897{
 898        mt76u_stop_rx(dev);
 899        mt76u_stop_tx(dev);
 900
 901        mt76u_free_rx(dev);
 902        mt76u_free_tx(dev);
 903}
 904EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
 905
 906int mt76u_alloc_queues(struct mt76_dev *dev)
 907{
 908        int err;
 909
 910        err = mt76u_alloc_rx(dev);
 911        if (err < 0)
 912                return err;
 913
 914        return mt76u_alloc_tx(dev);
 915}
 916EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
 917
 918static const struct mt76_queue_ops usb_queue_ops = {
 919        .tx_queue_skb = mt76u_tx_queue_skb,
 920        .kick = mt76u_tx_kick,
 921};
 922
 923int mt76u_init(struct mt76_dev *dev,
 924               struct usb_interface *intf)
 925{
 926        static const struct mt76_bus_ops mt76u_ops = {
 927                .rr = mt76u_rr,
 928                .wr = mt76u_wr,
 929                .rmw = mt76u_rmw,
 930                .copy = mt76u_copy,
 931                .wr_rp = mt76u_wr_rp,
 932                .rd_rp = mt76u_rd_rp,
 933                .type = MT76_BUS_USB,
 934        };
 935        struct mt76_usb *usb = &dev->usb;
 936
 937        tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
 938        tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
 939        INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
 940        skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
 941
 942        mutex_init(&usb->mcu.mutex);
 943
 944        mutex_init(&usb->usb_ctrl_mtx);
 945        dev->bus = &mt76u_ops;
 946        dev->queue_ops = &usb_queue_ops;
 947
 948        usb->sg_en = mt76u_check_sg(dev);
 949
 950        return mt76u_set_endpoints(intf, usb);
 951}
 952EXPORT_SYMBOL_GPL(mt76u_init);
 953
 954MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
 955MODULE_LICENSE("Dual BSD/GPL");
 956