linux/drivers/net/wireless/mediatek/mt76/mt76.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
   3 *
   4 * Permission to use, copy, modify, and/or distribute this software for any
   5 * purpose with or without fee is hereby granted, provided that the above
   6 * copyright notice and this permission notice appear in all copies.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15 */
  16
  17#ifndef __MT76_H
  18#define __MT76_H
  19
  20#include <linux/kernel.h>
  21#include <linux/io.h>
  22#include <linux/spinlock.h>
  23#include <linux/skbuff.h>
  24#include <linux/leds.h>
  25#include <linux/usb.h>
  26#include <net/mac80211.h>
  27#include "util.h"
  28
  29#define MT_TX_RING_SIZE     256
  30#define MT_MCU_RING_SIZE    32
  31#define MT_RX_BUF_SIZE      2048
  32
  33struct mt76_dev;
  34struct mt76_wcid;
  35
  36struct mt76_bus_ops {
  37        u32 (*rr)(struct mt76_dev *dev, u32 offset);
  38        void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
  39        u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
  40        void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
  41                     int len);
  42};
  43
  44enum mt76_txq_id {
  45        MT_TXQ_VO = IEEE80211_AC_VO,
  46        MT_TXQ_VI = IEEE80211_AC_VI,
  47        MT_TXQ_BE = IEEE80211_AC_BE,
  48        MT_TXQ_BK = IEEE80211_AC_BK,
  49        MT_TXQ_PSD,
  50        MT_TXQ_MCU,
  51        MT_TXQ_BEACON,
  52        MT_TXQ_CAB,
  53        __MT_TXQ_MAX
  54};
  55
  56enum mt76_rxq_id {
  57        MT_RXQ_MAIN,
  58        MT_RXQ_MCU,
  59        __MT_RXQ_MAX
  60};
  61
  62struct mt76_queue_buf {
  63        dma_addr_t addr;
  64        int len;
  65};
  66
  67struct mt76u_buf {
  68        struct mt76_dev *dev;
  69        struct urb *urb;
  70        size_t len;
  71        bool done;
  72};
  73
  74struct mt76_queue_entry {
  75        union {
  76                void *buf;
  77                struct sk_buff *skb;
  78        };
  79        union {
  80                struct mt76_txwi_cache *txwi;
  81                struct mt76u_buf ubuf;
  82        };
  83        bool schedule;
  84};
  85
  86struct mt76_queue_regs {
  87        u32 desc_base;
  88        u32 ring_size;
  89        u32 cpu_idx;
  90        u32 dma_idx;
  91} __packed __aligned(4);
  92
  93struct mt76_queue {
  94        struct mt76_queue_regs __iomem *regs;
  95
  96        spinlock_t lock;
  97        struct mt76_queue_entry *entry;
  98        struct mt76_desc *desc;
  99
 100        struct list_head swq;
 101        int swq_queued;
 102
 103        u16 first;
 104        u16 head;
 105        u16 tail;
 106        int ndesc;
 107        int queued;
 108        int buf_size;
 109
 110        u8 buf_offset;
 111        u8 hw_idx;
 112
 113        dma_addr_t desc_dma;
 114        struct sk_buff *rx_head;
 115};
 116
 117struct mt76_queue_ops {
 118        int (*init)(struct mt76_dev *dev);
 119
 120        int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
 121
 122        int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
 123                       struct mt76_queue_buf *buf, int nbufs, u32 info,
 124                       struct sk_buff *skb, void *txwi);
 125
 126        int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
 127                            struct sk_buff *skb, struct mt76_wcid *wcid,
 128                            struct ieee80211_sta *sta);
 129
 130        void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
 131                         int *len, u32 *info, bool *more);
 132
 133        void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
 134
 135        void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
 136                           bool flush);
 137
 138        void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
 139};
 140
 141enum mt76_wcid_flags {
 142        MT_WCID_FLAG_CHECK_PS,
 143        MT_WCID_FLAG_PS,
 144};
 145
 146struct mt76_wcid {
 147        struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
 148
 149        struct work_struct aggr_work;
 150
 151        unsigned long flags;
 152
 153        u8 idx;
 154        u8 hw_key_idx;
 155
 156        u8 sta:1;
 157
 158        u8 rx_check_pn;
 159        u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
 160
 161        __le16 tx_rate;
 162        bool tx_rate_set;
 163        u8 tx_rate_nss;
 164        s8 max_txpwr_adj;
 165        bool sw_iv;
 166};
 167
 168struct mt76_txq {
 169        struct list_head list;
 170        struct mt76_queue *hwq;
 171        struct mt76_wcid *wcid;
 172
 173        struct sk_buff_head retry_q;
 174
 175        u16 agg_ssn;
 176        bool send_bar;
 177        bool aggr;
 178};
 179
 180struct mt76_txwi_cache {
 181        u32 txwi[8];
 182        dma_addr_t dma_addr;
 183        struct list_head list;
 184};
 185
 186
 187struct mt76_rx_tid {
 188        struct rcu_head rcu_head;
 189
 190        struct mt76_dev *dev;
 191
 192        spinlock_t lock;
 193        struct delayed_work reorder_work;
 194
 195        u16 head;
 196        u8 size;
 197        u8 nframes;
 198
 199        u8 started:1, stopped:1, timer_pending:1;
 200
 201        struct sk_buff *reorder_buf[];
 202};
 203
 204enum {
 205        MT76_STATE_INITIALIZED,
 206        MT76_STATE_RUNNING,
 207        MT76_STATE_MCU_RUNNING,
 208        MT76_SCANNING,
 209        MT76_RESET,
 210        MT76_OFFCHANNEL,
 211        MT76_REMOVED,
 212        MT76_READING_STATS,
 213        MT76_MORE_STATS,
 214};
 215
 216struct mt76_hw_cap {
 217        bool has_2ghz;
 218        bool has_5ghz;
 219};
 220
 221struct mt76_driver_ops {
 222        u16 txwi_size;
 223
 224        void (*update_survey)(struct mt76_dev *dev);
 225
 226        int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
 227                              struct sk_buff *skb, struct mt76_queue *q,
 228                              struct mt76_wcid *wcid,
 229                              struct ieee80211_sta *sta, u32 *tx_info);
 230
 231        void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
 232                                struct mt76_queue_entry *e, bool flush);
 233
 234        bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
 235
 236        void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
 237                       struct sk_buff *skb);
 238
 239        void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
 240
 241        void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
 242                       bool ps);
 243};
 244
 245struct mt76_channel_state {
 246        u64 cc_active;
 247        u64 cc_busy;
 248};
 249
 250struct mt76_sband {
 251        struct ieee80211_supported_band sband;
 252        struct mt76_channel_state *chan;
 253};
 254
 255/* addr req mask */
 256#define MT_VEND_TYPE_EEPROM     BIT(31)
 257#define MT_VEND_TYPE_CFG        BIT(30)
 258#define MT_VEND_TYPE_MASK       (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
 259
 260#define MT_VEND_ADDR(type, n)   (MT_VEND_TYPE_##type | (n))
 261enum mt_vendor_req {
 262        MT_VEND_DEV_MODE =      0x1,
 263        MT_VEND_WRITE =         0x2,
 264        MT_VEND_MULTI_WRITE =   0x6,
 265        MT_VEND_MULTI_READ =    0x7,
 266        MT_VEND_READ_EEPROM =   0x9,
 267        MT_VEND_WRITE_FCE =     0x42,
 268        MT_VEND_WRITE_CFG =     0x46,
 269        MT_VEND_READ_CFG =      0x47,
 270};
 271
 272enum mt76u_in_ep {
 273        MT_EP_IN_PKT_RX,
 274        MT_EP_IN_CMD_RESP,
 275        __MT_EP_IN_MAX,
 276};
 277
 278enum mt76u_out_ep {
 279        MT_EP_OUT_INBAND_CMD,
 280        MT_EP_OUT_AC_BK,
 281        MT_EP_OUT_AC_BE,
 282        MT_EP_OUT_AC_VI,
 283        MT_EP_OUT_AC_VO,
 284        MT_EP_OUT_HCCA,
 285        __MT_EP_OUT_MAX,
 286};
 287
 288#define MT_SG_MAX_SIZE          8
 289#define MT_NUM_TX_ENTRIES       256
 290#define MT_NUM_RX_ENTRIES       128
 291#define MCU_RESP_URB_SIZE       1024
 292struct mt76_usb {
 293        struct mutex usb_ctrl_mtx;
 294        u8 data[32];
 295
 296        struct tasklet_struct rx_tasklet;
 297        struct tasklet_struct tx_tasklet;
 298        struct delayed_work stat_work;
 299
 300        u8 out_ep[__MT_EP_OUT_MAX];
 301        u16 out_max_packet;
 302        u8 in_ep[__MT_EP_IN_MAX];
 303        u16 in_max_packet;
 304
 305        struct mt76u_mcu {
 306                struct mutex mutex;
 307                struct completion cmpl;
 308                struct mt76u_buf res;
 309                u32 msg_seq;
 310        } mcu;
 311};
 312
 313struct mt76_dev {
 314        struct ieee80211_hw *hw;
 315        struct cfg80211_chan_def chandef;
 316        struct ieee80211_channel *main_chan;
 317
 318        spinlock_t lock;
 319        spinlock_t cc_lock;
 320        const struct mt76_bus_ops *bus;
 321        const struct mt76_driver_ops *drv;
 322        void __iomem *regs;
 323        struct device *dev;
 324
 325        struct net_device napi_dev;
 326        spinlock_t rx_lock;
 327        struct napi_struct napi[__MT_RXQ_MAX];
 328        struct sk_buff_head rx_skb[__MT_RXQ_MAX];
 329
 330        struct list_head txwi_cache;
 331        struct mt76_queue q_tx[__MT_TXQ_MAX];
 332        struct mt76_queue q_rx[__MT_RXQ_MAX];
 333        const struct mt76_queue_ops *queue_ops;
 334
 335        wait_queue_head_t tx_wait;
 336
 337        u8 macaddr[ETH_ALEN];
 338        u32 rev;
 339        unsigned long state;
 340
 341        u8 antenna_mask;
 342
 343        struct mt76_sband sband_2g;
 344        struct mt76_sband sband_5g;
 345        struct debugfs_blob_wrapper eeprom;
 346        struct debugfs_blob_wrapper otp;
 347        struct mt76_hw_cap cap;
 348
 349        u32 debugfs_reg;
 350
 351        struct led_classdev led_cdev;
 352        char led_name[32];
 353        bool led_al;
 354        u8 led_pin;
 355
 356        struct mt76_usb usb;
 357};
 358
 359enum mt76_phy_type {
 360        MT_PHY_TYPE_CCK,
 361        MT_PHY_TYPE_OFDM,
 362        MT_PHY_TYPE_HT,
 363        MT_PHY_TYPE_HT_GF,
 364        MT_PHY_TYPE_VHT,
 365};
 366
 367struct mt76_rate_power {
 368        union {
 369                struct {
 370                        s8 cck[4];
 371                        s8 ofdm[8];
 372                        s8 ht[16];
 373                        s8 vht[10];
 374                };
 375                s8 all[38];
 376        };
 377};
 378
 379struct mt76_rx_status {
 380        struct mt76_wcid *wcid;
 381
 382        unsigned long reorder_time;
 383
 384        u8 iv[6];
 385
 386        u8 aggr:1;
 387        u8 tid;
 388        u16 seqno;
 389
 390        u16 freq;
 391        u32 flag;
 392        u8 enc_flags;
 393        u8 encoding:2, bw:3;
 394        u8 rate_idx;
 395        u8 nss;
 396        u8 band;
 397        u8 signal;
 398        u8 chains;
 399        s8 chain_signal[IEEE80211_MAX_CHAINS];
 400};
 401
 402#define mt76_rr(dev, ...)       (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
 403#define mt76_wr(dev, ...)       (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
 404#define mt76_rmw(dev, ...)      (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
 405#define mt76_wr_copy(dev, ...)  (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
 406
 407#define mt76_set(dev, offset, val)      mt76_rmw(dev, offset, 0, val)
 408#define mt76_clear(dev, offset, val)    mt76_rmw(dev, offset, val, 0)
 409
 410#define mt76_get_field(_dev, _reg, _field)              \
 411        FIELD_GET(_field, mt76_rr(dev, _reg))
 412
 413#define mt76_rmw_field(_dev, _reg, _field, _val)        \
 414        mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
 415
 416#define mt76_hw(dev) (dev)->mt76.hw
 417
 418bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 419                 int timeout);
 420
 421#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
 422
 423bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 424                      int timeout);
 425
 426#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
 427
 428void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
 429
 430static inline u16 mt76_chip(struct mt76_dev *dev)
 431{
 432        return dev->rev >> 16;
 433}
 434
 435static inline u16 mt76_rev(struct mt76_dev *dev)
 436{
 437        return dev->rev & 0xffff;
 438}
 439
 440#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
 441#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
 442
 443#define mt76_init_queues(dev)           (dev)->mt76.queue_ops->init(&((dev)->mt76))
 444#define mt76_queue_alloc(dev, ...)      (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
 445#define mt76_queue_add_buf(dev, ...)    (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
 446#define mt76_queue_rx_reset(dev, ...)   (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
 447#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
 448#define mt76_queue_kick(dev, ...)       (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
 449
 450static inline struct mt76_channel_state *
 451mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
 452{
 453        struct mt76_sband *msband;
 454        int idx;
 455
 456        if (c->band == NL80211_BAND_2GHZ)
 457                msband = &dev->sband_2g;
 458        else
 459                msband = &dev->sband_5g;
 460
 461        idx = c - &msband->sband.channels[0];
 462        return &msband->chan[idx];
 463}
 464
 465struct mt76_dev *mt76_alloc_device(unsigned int size,
 466                                   const struct ieee80211_ops *ops);
 467int mt76_register_device(struct mt76_dev *dev, bool vht,
 468                         struct ieee80211_rate *rates, int n_rates);
 469void mt76_unregister_device(struct mt76_dev *dev);
 470
 471struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
 472
 473int mt76_eeprom_init(struct mt76_dev *dev, int len);
 474void mt76_eeprom_override(struct mt76_dev *dev);
 475
 476/* increment with wrap-around */
 477static inline int mt76_incr(int val, int size)
 478{
 479        return (val + 1) & (size - 1);
 480}
 481
 482/* decrement with wrap-around */
 483static inline int mt76_decr(int val, int size)
 484{
 485        return (val - 1) & (size - 1);
 486}
 487
 488/* Hardware uses mirrored order of queues with Q3
 489 * having the highest priority
 490 */
 491static inline u8 q2hwq(u8 q)
 492{
 493        return q ^ 0x3;
 494}
 495
 496static inline struct ieee80211_txq *
 497mtxq_to_txq(struct mt76_txq *mtxq)
 498{
 499        void *ptr = mtxq;
 500
 501        return container_of(ptr, struct ieee80211_txq, drv_priv);
 502}
 503
 504static inline struct ieee80211_sta *
 505wcid_to_sta(struct mt76_wcid *wcid)
 506{
 507        void *ptr = wcid;
 508
 509        if (!wcid || !wcid->sta)
 510                return NULL;
 511
 512        return container_of(ptr, struct ieee80211_sta, drv_priv);
 513}
 514
 515int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
 516                          struct sk_buff *skb, struct mt76_wcid *wcid,
 517                          struct ieee80211_sta *sta);
 518
 519void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
 520void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
 521             struct mt76_wcid *wcid, struct sk_buff *skb);
 522void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
 523void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
 524void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
 525void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
 526                         bool send_bar);
 527void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
 528void mt76_txq_schedule_all(struct mt76_dev *dev);
 529void mt76_release_buffered_frames(struct ieee80211_hw *hw,
 530                                  struct ieee80211_sta *sta,
 531                                  u16 tids, int nframes,
 532                                  enum ieee80211_frame_release_type reason,
 533                                  bool more_data);
 534void mt76_set_channel(struct mt76_dev *dev);
 535int mt76_get_survey(struct ieee80211_hw *hw, int idx,
 536                    struct survey_info *survey);
 537void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
 538
 539int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
 540                       u16 ssn, u8 size);
 541void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
 542
 543void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
 544                         struct ieee80211_key_conf *key);
 545
 546/* internal */
 547void mt76_tx_free(struct mt76_dev *dev);
 548struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
 549void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
 550void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
 551                      struct napi_struct *napi);
 552void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
 553                           struct napi_struct *napi);
 554void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
 555
 556/* usb */
 557static inline bool mt76u_urb_error(struct urb *urb)
 558{
 559        return urb->status &&
 560               urb->status != -ECONNRESET &&
 561               urb->status != -ESHUTDOWN &&
 562               urb->status != -ENOENT;
 563}
 564
 565/* Map hardware queues to usb endpoints */
 566static inline u8 q2ep(u8 qid)
 567{
 568        /* TODO: take management packets to queue 5 */
 569        return qid + 1;
 570}
 571
 572static inline bool mt76u_check_sg(struct mt76_dev *dev)
 573{
 574        struct usb_interface *intf = to_usb_interface(dev->dev);
 575        struct usb_device *udev = interface_to_usbdev(intf);
 576
 577        return (udev->bus->sg_tablesize > 0 &&
 578                (udev->bus->no_sg_constraint ||
 579                 udev->speed == USB_SPEED_WIRELESS));
 580}
 581
 582int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
 583                         u8 req_type, u16 val, u16 offset,
 584                         void *buf, size_t len);
 585void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
 586                     const u16 offset, const u32 val);
 587u32 mt76u_rr(struct mt76_dev *dev, u32 addr);
 588void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val);
 589int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
 590void mt76u_deinit(struct mt76_dev *dev);
 591int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
 592                    int nsgs, int len, int sglen, gfp_t gfp);
 593void mt76u_buf_free(struct mt76u_buf *buf);
 594int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
 595                     struct mt76u_buf *buf, gfp_t gfp,
 596                     usb_complete_t complete_fn, void *context);
 597int mt76u_submit_rx_buffers(struct mt76_dev *dev);
 598int mt76u_alloc_queues(struct mt76_dev *dev);
 599void mt76u_stop_queues(struct mt76_dev *dev);
 600void mt76u_stop_stat_wk(struct mt76_dev *dev);
 601void mt76u_queues_deinit(struct mt76_dev *dev);
 602int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
 603
 604int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
 605                           int data_len, u32 max_payload, u32 offset);
 606void mt76u_mcu_complete_urb(struct urb *urb);
 607struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
 608int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
 609                       int cmd, bool wait_resp);
 610void mt76u_mcu_fw_reset(struct mt76_dev *dev);
 611int mt76u_mcu_init_rx(struct mt76_dev *dev);
 612
 613#endif
 614