linux/drivers/net/wireless/mediatek/mt76/mt76.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: ISC */
   2/*
   3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
   4 */
   5
   6#ifndef __MT76_H
   7#define __MT76_H
   8
   9#include <linux/kernel.h>
  10#include <linux/io.h>
  11#include <linux/spinlock.h>
  12#include <linux/skbuff.h>
  13#include <linux/leds.h>
  14#include <linux/usb.h>
  15#include <linux/average.h>
  16#include <net/mac80211.h>
  17#include "util.h"
  18#include "testmode.h"
  19
  20#define MT_MCU_RING_SIZE        32
  21#define MT_RX_BUF_SIZE          2048
  22#define MT_SKB_HEAD_LEN         128
  23
  24#define MT_MAX_NON_AQL_PKT      16
  25#define MT_TXQ_FREE_THR         32
  26
  27#define MT76_TOKEN_FREE_THR     64
  28
  29struct mt76_dev;
  30struct mt76_phy;
  31struct mt76_wcid;
  32
  33struct mt76_reg_pair {
  34        u32 reg;
  35        u32 value;
  36};
  37
  38enum mt76_bus_type {
  39        MT76_BUS_MMIO,
  40        MT76_BUS_USB,
  41        MT76_BUS_SDIO,
  42};
  43
  44struct mt76_bus_ops {
  45        u32 (*rr)(struct mt76_dev *dev, u32 offset);
  46        void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
  47        u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
  48        void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
  49                           int len);
  50        void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
  51                          int len);
  52        int (*wr_rp)(struct mt76_dev *dev, u32 base,
  53                     const struct mt76_reg_pair *rp, int len);
  54        int (*rd_rp)(struct mt76_dev *dev, u32 base,
  55                     struct mt76_reg_pair *rp, int len);
  56        enum mt76_bus_type type;
  57};
  58
  59#define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
  60#define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
  61#define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO)
  62
  63enum mt76_txq_id {
  64        MT_TXQ_VO = IEEE80211_AC_VO,
  65        MT_TXQ_VI = IEEE80211_AC_VI,
  66        MT_TXQ_BE = IEEE80211_AC_BE,
  67        MT_TXQ_BK = IEEE80211_AC_BK,
  68        MT_TXQ_PSD,
  69        MT_TXQ_BEACON,
  70        MT_TXQ_CAB,
  71        __MT_TXQ_MAX
  72};
  73
  74enum mt76_mcuq_id {
  75        MT_MCUQ_WM,
  76        MT_MCUQ_WA,
  77        MT_MCUQ_FWDL,
  78        __MT_MCUQ_MAX
  79};
  80
  81enum mt76_rxq_id {
  82        MT_RXQ_MAIN,
  83        MT_RXQ_MCU,
  84        MT_RXQ_MCU_WA,
  85        MT_RXQ_EXT,
  86        MT_RXQ_EXT_WA,
  87        __MT_RXQ_MAX
  88};
  89
  90enum mt76_cipher_type {
  91        MT_CIPHER_NONE,
  92        MT_CIPHER_WEP40,
  93        MT_CIPHER_TKIP,
  94        MT_CIPHER_TKIP_NO_MIC,
  95        MT_CIPHER_AES_CCMP,
  96        MT_CIPHER_WEP104,
  97        MT_CIPHER_BIP_CMAC_128,
  98        MT_CIPHER_WEP128,
  99        MT_CIPHER_WAPI,
 100        MT_CIPHER_CCMP_CCX,
 101        MT_CIPHER_CCMP_256,
 102        MT_CIPHER_GCMP,
 103        MT_CIPHER_GCMP_256,
 104};
 105
 106struct mt76_queue_buf {
 107        dma_addr_t addr;
 108        u16 len;
 109        bool skip_unmap;
 110};
 111
 112struct mt76_tx_info {
 113        struct mt76_queue_buf buf[32];
 114        struct sk_buff *skb;
 115        int nbuf;
 116        u32 info;
 117};
 118
 119struct mt76_queue_entry {
 120        union {
 121                void *buf;
 122                struct sk_buff *skb;
 123        };
 124        union {
 125                struct mt76_txwi_cache *txwi;
 126                struct urb *urb;
 127                int buf_sz;
 128        };
 129        u32 dma_addr[2];
 130        u16 dma_len[2];
 131        u16 wcid;
 132        bool skip_buf0:1;
 133        bool skip_buf1:1;
 134        bool done:1;
 135};
 136
 137struct mt76_queue_regs {
 138        u32 desc_base;
 139        u32 ring_size;
 140        u32 cpu_idx;
 141        u32 dma_idx;
 142} __packed __aligned(4);
 143
 144struct mt76_queue {
 145        struct mt76_queue_regs __iomem *regs;
 146
 147        spinlock_t lock;
 148        spinlock_t cleanup_lock;
 149        struct mt76_queue_entry *entry;
 150        struct mt76_desc *desc;
 151
 152        u16 first;
 153        u16 head;
 154        u16 tail;
 155        int ndesc;
 156        int queued;
 157        int buf_size;
 158        bool stopped;
 159        bool blocked;
 160
 161        u8 buf_offset;
 162        u8 hw_idx;
 163        u8 qid;
 164
 165        dma_addr_t desc_dma;
 166        struct sk_buff *rx_head;
 167        struct page_frag_cache rx_page;
 168};
 169
 170struct mt76_mcu_ops {
 171        u32 headroom;
 172        u32 tailroom;
 173
 174        int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
 175                            int len, bool wait_resp);
 176        int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
 177                                int cmd, int *seq);
 178        int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
 179                                  struct sk_buff *skb, int seq);
 180        u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
 181        void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
 182        int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
 183                         const struct mt76_reg_pair *rp, int len);
 184        int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
 185                         struct mt76_reg_pair *rp, int len);
 186        int (*mcu_restart)(struct mt76_dev *dev);
 187};
 188
 189struct mt76_queue_ops {
 190        int (*init)(struct mt76_dev *dev,
 191                    int (*poll)(struct napi_struct *napi, int budget));
 192
 193        int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
 194                     int idx, int n_desc, int bufsize,
 195                     u32 ring_base);
 196
 197        int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
 198                            struct sk_buff *skb, struct mt76_wcid *wcid,
 199                            struct ieee80211_sta *sta);
 200
 201        int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
 202                                struct sk_buff *skb, u32 tx_info);
 203
 204        void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
 205                         int *len, u32 *info, bool *more);
 206
 207        void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
 208
 209        void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
 210                           bool flush);
 211
 212        void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
 213
 214        void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
 215
 216        void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
 217};
 218
 219enum mt76_wcid_flags {
 220        MT_WCID_FLAG_CHECK_PS,
 221        MT_WCID_FLAG_PS,
 222        MT_WCID_FLAG_4ADDR,
 223        MT_WCID_FLAG_HDR_TRANS,
 224};
 225
 226#define MT76_N_WCIDS 288
 227
 228/* stored in ieee80211_tx_info::hw_queue */
 229#define MT_TX_HW_QUEUE_EXT_PHY          BIT(3)
 230
 231DECLARE_EWMA(signal, 10, 8);
 232
 233#define MT_WCID_TX_INFO_RATE            GENMASK(15, 0)
 234#define MT_WCID_TX_INFO_NSS             GENMASK(17, 16)
 235#define MT_WCID_TX_INFO_TXPWR_ADJ       GENMASK(25, 18)
 236#define MT_WCID_TX_INFO_SET             BIT(31)
 237
 238struct mt76_wcid {
 239        struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
 240
 241        atomic_t non_aql_packets;
 242        unsigned long flags;
 243
 244        struct ewma_signal rssi;
 245        int inactive_count;
 246
 247        u16 idx;
 248        u8 hw_key_idx;
 249        u8 hw_key_idx2;
 250
 251        u8 sta:1;
 252        u8 ext_phy:1;
 253        u8 amsdu:1;
 254
 255        u8 rx_check_pn;
 256        u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
 257        u16 cipher;
 258
 259        u32 tx_info;
 260        bool sw_iv;
 261
 262        u8 packet_id;
 263};
 264
 265struct mt76_txq {
 266        struct mt76_wcid *wcid;
 267
 268        u16 agg_ssn;
 269        bool send_bar;
 270        bool aggr;
 271};
 272
 273struct mt76_txwi_cache {
 274        struct list_head list;
 275        dma_addr_t dma_addr;
 276
 277        struct sk_buff *skb;
 278};
 279
 280struct mt76_rx_tid {
 281        struct rcu_head rcu_head;
 282
 283        struct mt76_dev *dev;
 284
 285        spinlock_t lock;
 286        struct delayed_work reorder_work;
 287
 288        u16 head;
 289        u16 size;
 290        u16 nframes;
 291
 292        u8 num;
 293
 294        u8 started:1, stopped:1, timer_pending:1;
 295
 296        struct sk_buff *reorder_buf[];
 297};
 298
 299#define MT_TX_CB_DMA_DONE               BIT(0)
 300#define MT_TX_CB_TXS_DONE               BIT(1)
 301#define MT_TX_CB_TXS_FAILED             BIT(2)
 302
 303#define MT_PACKET_ID_MASK               GENMASK(6, 0)
 304#define MT_PACKET_ID_NO_ACK             0
 305#define MT_PACKET_ID_NO_SKB             1
 306#define MT_PACKET_ID_FIRST              2
 307#define MT_PACKET_ID_HAS_RATE           BIT(7)
 308
 309#define MT_TX_STATUS_SKB_TIMEOUT        HZ
 310
 311struct mt76_tx_cb {
 312        unsigned long jiffies;
 313        u16 wcid;
 314        u8 pktid;
 315        u8 flags;
 316};
 317
 318enum {
 319        MT76_STATE_INITIALIZED,
 320        MT76_STATE_RUNNING,
 321        MT76_STATE_MCU_RUNNING,
 322        MT76_SCANNING,
 323        MT76_HW_SCANNING,
 324        MT76_HW_SCHED_SCANNING,
 325        MT76_RESTART,
 326        MT76_RESET,
 327        MT76_MCU_RESET,
 328        MT76_REMOVED,
 329        MT76_READING_STATS,
 330        MT76_STATE_POWER_OFF,
 331        MT76_STATE_SUSPEND,
 332        MT76_STATE_ROC,
 333        MT76_STATE_PM,
 334};
 335
 336struct mt76_hw_cap {
 337        bool has_2ghz;
 338        bool has_5ghz;
 339        bool has_6ghz;
 340};
 341
 342#define MT_DRV_TXWI_NO_FREE             BIT(0)
 343#define MT_DRV_TX_ALIGNED4_SKBS         BIT(1)
 344#define MT_DRV_SW_RX_AIRTIME            BIT(2)
 345#define MT_DRV_RX_DMA_HDR               BIT(3)
 346#define MT_DRV_HW_MGMT_TXQ              BIT(4)
 347#define MT_DRV_AMSDU_OFFLOAD            BIT(5)
 348
 349struct mt76_driver_ops {
 350        u32 drv_flags;
 351        u32 survey_flags;
 352        u16 txwi_size;
 353        u16 token_size;
 354        u8 mcs_rates;
 355
 356        void (*update_survey)(struct mt76_phy *phy);
 357
 358        int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
 359                              enum mt76_txq_id qid, struct mt76_wcid *wcid,
 360                              struct ieee80211_sta *sta,
 361                              struct mt76_tx_info *tx_info);
 362
 363        void (*tx_complete_skb)(struct mt76_dev *dev,
 364                                struct mt76_queue_entry *e);
 365
 366        bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
 367
 368        void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
 369                       struct sk_buff *skb);
 370
 371        void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
 372
 373        void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
 374                       bool ps);
 375
 376        int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
 377                       struct ieee80211_sta *sta);
 378
 379        void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
 380                          struct ieee80211_sta *sta);
 381
 382        void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
 383                           struct ieee80211_sta *sta);
 384};
 385
 386struct mt76_channel_state {
 387        u64 cc_active;
 388        u64 cc_busy;
 389        u64 cc_rx;
 390        u64 cc_bss_rx;
 391        u64 cc_tx;
 392
 393        s8 noise;
 394};
 395
 396struct mt76_sband {
 397        struct ieee80211_supported_band sband;
 398        struct mt76_channel_state *chan;
 399};
 400
 401struct mt76_rate_power {
 402        union {
 403                struct {
 404                        s8 cck[4];
 405                        s8 ofdm[8];
 406                        s8 stbc[10];
 407                        s8 ht[16];
 408                        s8 vht[10];
 409                };
 410                s8 all[48];
 411        };
 412};
 413
 414/* addr req mask */
 415#define MT_VEND_TYPE_EEPROM     BIT(31)
 416#define MT_VEND_TYPE_CFG        BIT(30)
 417#define MT_VEND_TYPE_MASK       (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
 418
 419#define MT_VEND_ADDR(type, n)   (MT_VEND_TYPE_##type | (n))
 420enum mt_vendor_req {
 421        MT_VEND_DEV_MODE =      0x1,
 422        MT_VEND_WRITE =         0x2,
 423        MT_VEND_POWER_ON =      0x4,
 424        MT_VEND_MULTI_WRITE =   0x6,
 425        MT_VEND_MULTI_READ =    0x7,
 426        MT_VEND_READ_EEPROM =   0x9,
 427        MT_VEND_WRITE_FCE =     0x42,
 428        MT_VEND_WRITE_CFG =     0x46,
 429        MT_VEND_READ_CFG =      0x47,
 430        MT_VEND_READ_EXT =      0x63,
 431        MT_VEND_WRITE_EXT =     0x66,
 432        MT_VEND_FEATURE_SET =   0x91,
 433};
 434
 435enum mt76u_in_ep {
 436        MT_EP_IN_PKT_RX,
 437        MT_EP_IN_CMD_RESP,
 438        __MT_EP_IN_MAX,
 439};
 440
 441enum mt76u_out_ep {
 442        MT_EP_OUT_INBAND_CMD,
 443        MT_EP_OUT_AC_BE,
 444        MT_EP_OUT_AC_BK,
 445        MT_EP_OUT_AC_VI,
 446        MT_EP_OUT_AC_VO,
 447        MT_EP_OUT_HCCA,
 448        __MT_EP_OUT_MAX,
 449};
 450
 451struct mt76_mcu {
 452        struct mutex mutex;
 453        u32 msg_seq;
 454        int timeout;
 455
 456        struct sk_buff_head res_q;
 457        wait_queue_head_t wait;
 458};
 459
 460#define MT_TX_SG_MAX_SIZE       8
 461#define MT_RX_SG_MAX_SIZE       4
 462#define MT_NUM_TX_ENTRIES       256
 463#define MT_NUM_RX_ENTRIES       128
 464#define MCU_RESP_URB_SIZE       1024
 465struct mt76_usb {
 466        struct mutex usb_ctrl_mtx;
 467        u8 *data;
 468        u16 data_len;
 469
 470        struct mt76_worker status_worker;
 471        struct mt76_worker rx_worker;
 472
 473        struct work_struct stat_work;
 474
 475        u8 out_ep[__MT_EP_OUT_MAX];
 476        u8 in_ep[__MT_EP_IN_MAX];
 477        bool sg_en;
 478
 479        struct mt76u_mcu {
 480                u8 *data;
 481                /* multiple reads */
 482                struct mt76_reg_pair *rp;
 483                int rp_len;
 484                u32 base;
 485                bool burst;
 486        } mcu;
 487};
 488
 489#define MT76S_XMIT_BUF_SZ       (16 * PAGE_SIZE)
 490struct mt76_sdio {
 491        struct mt76_worker txrx_worker;
 492        struct mt76_worker status_worker;
 493        struct mt76_worker net_worker;
 494
 495        struct work_struct stat_work;
 496
 497        u8 *xmit_buf[IEEE80211_NUM_ACS + 2];
 498
 499        struct sdio_func *func;
 500        void *intr_data;
 501
 502        struct {
 503                int pse_data_quota;
 504                int ple_data_quota;
 505                int pse_mcu_quota;
 506                int deficit;
 507        } sched;
 508};
 509
 510struct mt76_mmio {
 511        void __iomem *regs;
 512        spinlock_t irq_lock;
 513        u32 irqmask;
 514};
 515
 516struct mt76_rx_status {
 517        union {
 518                struct mt76_wcid *wcid;
 519                u16 wcid_idx;
 520        };
 521
 522        u32 reorder_time;
 523
 524        u32 ampdu_ref;
 525        u32 timestamp;
 526
 527        u8 iv[6];
 528
 529        u8 ext_phy:1;
 530        u8 aggr:1;
 531        u8 qos_ctl;
 532        u16 seqno;
 533
 534        u16 freq;
 535        u32 flag;
 536        u8 enc_flags;
 537        u8 encoding:2, bw:3, he_ru:3;
 538        u8 he_gi:2, he_dcm:1;
 539        u8 amsdu:1, first_amsdu:1, last_amsdu:1;
 540        u8 rate_idx;
 541        u8 nss;
 542        u8 band;
 543        s8 signal;
 544        u8 chains;
 545        s8 chain_signal[IEEE80211_MAX_CHAINS];
 546};
 547
 548struct mt76_testmode_ops {
 549        int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state);
 550        int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
 551                          enum mt76_testmode_state new_state);
 552        int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
 553};
 554
 555struct mt76_testmode_data {
 556        enum mt76_testmode_state state;
 557
 558        u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
 559        struct sk_buff *tx_skb;
 560
 561        u32 tx_count;
 562        u16 tx_mpdu_len;
 563
 564        u8 tx_rate_mode;
 565        u8 tx_rate_idx;
 566        u8 tx_rate_nss;
 567        u8 tx_rate_sgi;
 568        u8 tx_rate_ldpc;
 569        u8 tx_rate_stbc;
 570        u8 tx_ltf;
 571
 572        u8 tx_antenna_mask;
 573        u8 tx_spe_idx;
 574
 575        u8 tx_duty_cycle;
 576        u32 tx_time;
 577        u32 tx_ipg;
 578
 579        u32 freq_offset;
 580
 581        u8 tx_power[4];
 582        u8 tx_power_control;
 583
 584        u32 tx_pending;
 585        u32 tx_queued;
 586        u16 tx_queued_limit;
 587        u32 tx_done;
 588        struct {
 589                u64 packets[__MT_RXQ_MAX];
 590                u64 fcs_error[__MT_RXQ_MAX];
 591        } rx_stats;
 592};
 593
 594struct mt76_vif {
 595        u8 idx;
 596        u8 omac_idx;
 597        u8 band_idx;
 598        u8 wmm_idx;
 599        u8 scan_seq_num;
 600};
 601
 602struct mt76_phy {
 603        struct ieee80211_hw *hw;
 604        struct mt76_dev *dev;
 605        void *priv;
 606
 607        unsigned long state;
 608
 609        struct mt76_queue *q_tx[__MT_TXQ_MAX];
 610
 611        struct cfg80211_chan_def chandef;
 612        struct ieee80211_channel *main_chan;
 613
 614        struct mt76_channel_state *chan_state;
 615        ktime_t survey_time;
 616
 617        struct mt76_hw_cap cap;
 618        struct mt76_sband sband_2g;
 619        struct mt76_sband sband_5g;
 620
 621        u8 macaddr[ETH_ALEN];
 622
 623        int txpower_cur;
 624        u8 antenna_mask;
 625        u16 chainmask;
 626
 627#ifdef CONFIG_NL80211_TESTMODE
 628        struct mt76_testmode_data test;
 629#endif
 630
 631        struct delayed_work mac_work;
 632        u8 mac_work_count;
 633
 634        struct {
 635                struct sk_buff *head;
 636                struct sk_buff **tail;
 637                u16 seqno;
 638        } rx_amsdu[__MT_RXQ_MAX];
 639};
 640
 641struct mt76_dev {
 642        struct mt76_phy phy; /* must be first */
 643
 644        struct mt76_phy *phy2;
 645
 646        struct ieee80211_hw *hw;
 647
 648        spinlock_t lock;
 649        spinlock_t cc_lock;
 650
 651        u32 cur_cc_bss_rx;
 652
 653        struct mt76_rx_status rx_ampdu_status;
 654        u32 rx_ampdu_len;
 655        u32 rx_ampdu_ref;
 656
 657        struct mutex mutex;
 658
 659        const struct mt76_bus_ops *bus;
 660        const struct mt76_driver_ops *drv;
 661        const struct mt76_mcu_ops *mcu_ops;
 662        struct device *dev;
 663
 664        struct mt76_mcu mcu;
 665
 666        struct net_device napi_dev;
 667        struct net_device tx_napi_dev;
 668        spinlock_t rx_lock;
 669        struct napi_struct napi[__MT_RXQ_MAX];
 670        struct sk_buff_head rx_skb[__MT_RXQ_MAX];
 671
 672        struct list_head txwi_cache;
 673        struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
 674        struct mt76_queue q_rx[__MT_RXQ_MAX];
 675        const struct mt76_queue_ops *queue_ops;
 676        int tx_dma_idx[4];
 677
 678        struct mt76_worker tx_worker;
 679        struct napi_struct tx_napi;
 680
 681        spinlock_t token_lock;
 682        struct idr token;
 683        int token_count;
 684
 685        wait_queue_head_t tx_wait;
 686        struct sk_buff_head status_list;
 687
 688        u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
 689        u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
 690
 691        u32 vif_mask;
 692
 693        struct mt76_wcid global_wcid;
 694        struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
 695
 696        u32 rev;
 697
 698        u32 aggr_stats[32];
 699
 700        struct tasklet_struct pre_tbtt_tasklet;
 701        int beacon_int;
 702        u8 beacon_mask;
 703
 704        struct debugfs_blob_wrapper eeprom;
 705        struct debugfs_blob_wrapper otp;
 706
 707        struct mt76_rate_power rate_power;
 708
 709        char alpha2[3];
 710        enum nl80211_dfs_regions region;
 711
 712        u32 debugfs_reg;
 713
 714        struct led_classdev led_cdev;
 715        char led_name[32];
 716        bool led_al;
 717        u8 led_pin;
 718
 719        u8 csa_complete;
 720
 721        u32 rxfilter;
 722
 723#ifdef CONFIG_NL80211_TESTMODE
 724        const struct mt76_testmode_ops *test_ops;
 725        struct {
 726                const char *name;
 727                u32 offset;
 728        } test_mtd;
 729#endif
 730        struct workqueue_struct *wq;
 731
 732        union {
 733                struct mt76_mmio mmio;
 734                struct mt76_usb usb;
 735                struct mt76_sdio sdio;
 736        };
 737};
 738
 739struct mt76_power_limits {
 740        s8 cck[4];
 741        s8 ofdm[8];
 742        s8 mcs[4][10];
 743        s8 ru[7][12];
 744};
 745
 746enum mt76_phy_type {
 747        MT_PHY_TYPE_CCK,
 748        MT_PHY_TYPE_OFDM,
 749        MT_PHY_TYPE_HT,
 750        MT_PHY_TYPE_HT_GF,
 751        MT_PHY_TYPE_VHT,
 752        MT_PHY_TYPE_HE_SU = 8,
 753        MT_PHY_TYPE_HE_EXT_SU,
 754        MT_PHY_TYPE_HE_TB,
 755        MT_PHY_TYPE_HE_MU,
 756};
 757
 758#define CCK_RATE(_idx, _rate) {                                 \
 759        .bitrate = _rate,                                       \
 760        .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
 761        .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),            \
 762        .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx),  \
 763}
 764
 765#define OFDM_RATE(_idx, _rate) {                                \
 766        .bitrate = _rate,                                       \
 767        .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),           \
 768        .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),     \
 769}
 770
 771extern struct ieee80211_rate mt76_rates[12];
 772
 773#define __mt76_rr(dev, ...)     (dev)->bus->rr((dev), __VA_ARGS__)
 774#define __mt76_wr(dev, ...)     (dev)->bus->wr((dev), __VA_ARGS__)
 775#define __mt76_rmw(dev, ...)    (dev)->bus->rmw((dev), __VA_ARGS__)
 776#define __mt76_wr_copy(dev, ...)        (dev)->bus->write_copy((dev), __VA_ARGS__)
 777#define __mt76_rr_copy(dev, ...)        (dev)->bus->read_copy((dev), __VA_ARGS__)
 778
 779#define __mt76_set(dev, offset, val)    __mt76_rmw(dev, offset, 0, val)
 780#define __mt76_clear(dev, offset, val)  __mt76_rmw(dev, offset, val, 0)
 781
 782#define mt76_rr(dev, ...)       (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
 783#define mt76_wr(dev, ...)       (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
 784#define mt76_rmw(dev, ...)      (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
 785#define mt76_wr_copy(dev, ...)  (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
 786#define mt76_rr_copy(dev, ...)  (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
 787#define mt76_wr_rp(dev, ...)    (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
 788#define mt76_rd_rp(dev, ...)    (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
 789
 790
 791#define mt76_mcu_restart(dev, ...)      (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
 792#define __mt76_mcu_restart(dev, ...)    (dev)->mcu_ops->mcu_restart((dev))
 793
 794#define mt76_set(dev, offset, val)      mt76_rmw(dev, offset, 0, val)
 795#define mt76_clear(dev, offset, val)    mt76_rmw(dev, offset, val, 0)
 796
 797#define mt76_get_field(_dev, _reg, _field)              \
 798        FIELD_GET(_field, mt76_rr(dev, _reg))
 799
 800#define mt76_rmw_field(_dev, _reg, _field, _val)        \
 801        mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
 802
 803#define __mt76_rmw_field(_dev, _reg, _field, _val)      \
 804        __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
 805
 806#define mt76_hw(dev) (dev)->mphy.hw
 807
 808static inline struct ieee80211_hw *
 809mt76_wcid_hw(struct mt76_dev *dev, u16 wcid)
 810{
 811        if (wcid <= MT76_N_WCIDS &&
 812            mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
 813                return dev->phy2->hw;
 814
 815        return dev->phy.hw;
 816}
 817
 818bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 819                 int timeout);
 820
 821#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
 822
 823bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 824                      int timeout);
 825
 826#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
 827
 828void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
 829void mt76_pci_disable_aspm(struct pci_dev *pdev);
 830
 831static inline u16 mt76_chip(struct mt76_dev *dev)
 832{
 833        return dev->rev >> 16;
 834}
 835
 836static inline u16 mt76_rev(struct mt76_dev *dev)
 837{
 838        return dev->rev & 0xffff;
 839}
 840
 841#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
 842#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
 843
 844#define mt76_init_queues(dev, ...)              (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
 845#define mt76_queue_alloc(dev, ...)      (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
 846#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
 847#define mt76_tx_queue_skb(dev, ...)     (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
 848#define mt76_queue_rx_reset(dev, ...)   (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
 849#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
 850#define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
 851#define mt76_queue_kick(dev, ...)       (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
 852#define mt76_queue_reset(dev, ...)      (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
 853
 854#define mt76_for_each_q_rx(dev, i)      \
 855        for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
 856                    (dev)->q_rx[i].ndesc; i++)
 857
 858struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
 859                                   const struct ieee80211_ops *ops,
 860                                   const struct mt76_driver_ops *drv_ops);
 861int mt76_register_device(struct mt76_dev *dev, bool vht,
 862                         struct ieee80211_rate *rates, int n_rates);
 863void mt76_unregister_device(struct mt76_dev *dev);
 864void mt76_free_device(struct mt76_dev *dev);
 865void mt76_unregister_phy(struct mt76_phy *phy);
 866
 867struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
 868                                const struct ieee80211_ops *ops);
 869int mt76_register_phy(struct mt76_phy *phy, bool vht,
 870                      struct ieee80211_rate *rates, int n_rates);
 871
 872struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
 873int mt76_queues_read(struct seq_file *s, void *data);
 874void mt76_seq_puts_array(struct seq_file *file, const char *str,
 875                         s8 *val, int len);
 876
 877int mt76_eeprom_init(struct mt76_dev *dev, int len);
 878void mt76_eeprom_override(struct mt76_phy *phy);
 879int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
 880
 881struct mt76_queue *
 882mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
 883                int ring_base);
 884static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
 885                                     int n_desc, int ring_base)
 886{
 887        struct mt76_queue *q;
 888
 889        q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base);
 890        if (IS_ERR(q))
 891                return PTR_ERR(q);
 892
 893        q->qid = qid;
 894        phy->q_tx[qid] = q;
 895
 896        return 0;
 897}
 898
 899static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
 900                                      int n_desc, int ring_base)
 901{
 902        struct mt76_queue *q;
 903
 904        q = mt76_init_queue(dev, qid, idx, n_desc, ring_base);
 905        if (IS_ERR(q))
 906                return PTR_ERR(q);
 907
 908        q->qid = __MT_TXQ_MAX + qid;
 909        dev->q_mcu[qid] = q;
 910
 911        return 0;
 912}
 913
 914static inline struct mt76_phy *
 915mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
 916{
 917        if (phy_ext && dev->phy2)
 918                return dev->phy2;
 919        return &dev->phy;
 920}
 921
 922static inline struct ieee80211_hw *
 923mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
 924{
 925        return mt76_dev_phy(dev, phy_ext)->hw;
 926}
 927
 928static inline u8 *
 929mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
 930{
 931        return (u8 *)t - dev->drv->txwi_size;
 932}
 933
 934/* increment with wrap-around */
 935static inline int mt76_incr(int val, int size)
 936{
 937        return (val + 1) & (size - 1);
 938}
 939
 940/* decrement with wrap-around */
 941static inline int mt76_decr(int val, int size)
 942{
 943        return (val - 1) & (size - 1);
 944}
 945
 946u8 mt76_ac_to_hwq(u8 ac);
 947
 948static inline struct ieee80211_txq *
 949mtxq_to_txq(struct mt76_txq *mtxq)
 950{
 951        void *ptr = mtxq;
 952
 953        return container_of(ptr, struct ieee80211_txq, drv_priv);
 954}
 955
 956static inline struct ieee80211_sta *
 957wcid_to_sta(struct mt76_wcid *wcid)
 958{
 959        void *ptr = wcid;
 960
 961        if (!wcid || !wcid->sta)
 962                return NULL;
 963
 964        return container_of(ptr, struct ieee80211_sta, drv_priv);
 965}
 966
 967static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
 968{
 969        BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
 970                     sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
 971        return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
 972}
 973
 974static inline void *mt76_skb_get_hdr(struct sk_buff *skb)
 975{
 976        struct mt76_rx_status mstat;
 977        u8 *data = skb->data;
 978
 979        /* Alignment concerns */
 980        BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
 981        BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
 982
 983        mstat = *((struct mt76_rx_status *)skb->cb);
 984
 985        if (mstat.flag & RX_FLAG_RADIOTAP_HE)
 986                data += sizeof(struct ieee80211_radiotap_he);
 987        if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU)
 988                data += sizeof(struct ieee80211_radiotap_he_mu);
 989
 990        return data;
 991}
 992
 993static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
 994{
 995        int len = ieee80211_get_hdrlen_from_skb(skb);
 996
 997        if (len % 4 == 0)
 998                return;
 999
1000        skb_push(skb, 2);
1001        memmove(skb->data, skb->data + 2, len);
1002
1003        skb->data[len] = 0;
1004        skb->data[len + 1] = 0;
1005}
1006
1007static inline bool mt76_is_skb_pktid(u8 pktid)
1008{
1009        if (pktid & MT_PACKET_ID_HAS_RATE)
1010                return false;
1011
1012        return pktid >= MT_PACKET_ID_FIRST;
1013}
1014
1015static inline u8 mt76_tx_power_nss_delta(u8 nss)
1016{
1017        static const u8 nss_delta[4] = { 0, 6, 9, 12 };
1018
1019        return nss_delta[nss - 1];
1020}
1021
1022static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
1023{
1024#ifdef CONFIG_NL80211_TESTMODE
1025        return phy->test.state != MT76_TM_STATE_OFF;
1026#else
1027        return false;
1028#endif
1029}
1030
1031static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
1032                                        struct sk_buff *skb,
1033                                        struct ieee80211_hw **hw)
1034{
1035#ifdef CONFIG_NL80211_TESTMODE
1036        if (skb == dev->phy.test.tx_skb)
1037                *hw = dev->phy.hw;
1038        else if (dev->phy2 && skb == dev->phy2->test.tx_skb)
1039                *hw = dev->phy2->hw;
1040        else
1041                return false;
1042        return true;
1043#else
1044        return false;
1045#endif
1046}
1047
1048void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
1049void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
1050             struct mt76_wcid *wcid, struct sk_buff *skb);
1051void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
1052void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
1053                         bool send_bar);
1054void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
1055void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
1056void mt76_txq_schedule_all(struct mt76_phy *phy);
1057void mt76_tx_worker_run(struct mt76_dev *dev);
1058void mt76_tx_worker(struct mt76_worker *w);
1059void mt76_release_buffered_frames(struct ieee80211_hw *hw,
1060                                  struct ieee80211_sta *sta,
1061                                  u16 tids, int nframes,
1062                                  enum ieee80211_frame_release_type reason,
1063                                  bool more_data);
1064bool mt76_has_tx_pending(struct mt76_phy *phy);
1065void mt76_set_channel(struct mt76_phy *phy);
1066void mt76_update_survey(struct mt76_phy *phy);
1067void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
1068int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1069                    struct survey_info *survey);
1070void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
1071
1072int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
1073                       u16 ssn, u16 size);
1074void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
1075
1076void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1077                         struct ieee80211_key_conf *key);
1078
1079void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
1080                         __acquires(&dev->status_list.lock);
1081void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
1082                           __releases(&dev->status_list.lock);
1083
1084int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
1085                           struct sk_buff *skb);
1086struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
1087                                       struct mt76_wcid *wcid, int pktid,
1088                                       struct sk_buff_head *list);
1089void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
1090                             struct sk_buff_head *list);
1091void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
1092                            struct list_head *free_list);
1093static inline void
1094mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
1095{
1096    __mt76_tx_complete_skb(dev, wcid, skb, NULL);
1097}
1098
1099void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
1100                          bool flush);
1101int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1102                   struct ieee80211_sta *sta,
1103                   enum ieee80211_sta_state old_state,
1104                   enum ieee80211_sta_state new_state);
1105void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1106                       struct ieee80211_sta *sta);
1107void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1108                             struct ieee80211_sta *sta);
1109
1110int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
1111
1112int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1113                     int *dbm);
1114
1115void mt76_csa_check(struct mt76_dev *dev);
1116void mt76_csa_finish(struct mt76_dev *dev);
1117
1118int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
1119int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
1120void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
1121int mt76_get_rate(struct mt76_dev *dev,
1122                  struct ieee80211_supported_band *sband,
1123                  int idx, bool cck);
1124void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1125                  const u8 *mac);
1126void mt76_sw_scan_complete(struct ieee80211_hw *hw,
1127                           struct ieee80211_vif *vif);
1128int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1129                      void *data, int len);
1130int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
1131                       struct netlink_callback *cb, void *data, int len);
1132int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
1133int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
1134
1135static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
1136{
1137#ifdef CONFIG_NL80211_TESTMODE
1138        enum mt76_testmode_state state = MT76_TM_STATE_IDLE;
1139
1140        if (disable || phy->test.state == MT76_TM_STATE_OFF)
1141                state = MT76_TM_STATE_OFF;
1142
1143        mt76_testmode_set_state(phy, state);
1144#endif
1145}
1146
1147
1148/* internal */
1149static inline struct ieee80211_hw *
1150mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
1151{
1152        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1153        struct ieee80211_hw *hw = dev->phy.hw;
1154
1155        if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
1156                hw = dev->phy2->hw;
1157
1158        info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
1159
1160        return hw;
1161}
1162
1163void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
1164void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1165                      struct napi_struct *napi);
1166void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1167                           struct napi_struct *napi);
1168void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
1169void mt76_testmode_tx_pending(struct mt76_phy *phy);
1170void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
1171                            struct mt76_queue_entry *e);
1172
1173/* usb */
1174static inline bool mt76u_urb_error(struct urb *urb)
1175{
1176        return urb->status &&
1177               urb->status != -ECONNRESET &&
1178               urb->status != -ESHUTDOWN &&
1179               urb->status != -ENOENT;
1180}
1181
1182/* Map hardware queues to usb endpoints */
1183static inline u8 q2ep(u8 qid)
1184{
1185        /* TODO: take management packets to queue 5 */
1186        return qid + 1;
1187}
1188
1189static inline int
1190mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
1191               int timeout, int ep)
1192{
1193        struct usb_interface *uintf = to_usb_interface(dev->dev);
1194        struct usb_device *udev = interface_to_usbdev(uintf);
1195        struct mt76_usb *usb = &dev->usb;
1196        unsigned int pipe;
1197
1198        if (actual_len)
1199                pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]);
1200        else
1201                pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]);
1202
1203        return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
1204}
1205
1206int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
1207int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
1208                         u8 req_type, u16 val, u16 offset,
1209                         void *buf, size_t len);
1210void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
1211                     const u16 offset, const u32 val);
1212int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1213               bool ext);
1214int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
1215int mt76u_alloc_queues(struct mt76_dev *dev);
1216void mt76u_stop_tx(struct mt76_dev *dev);
1217void mt76u_stop_rx(struct mt76_dev *dev);
1218int mt76u_resume_rx(struct mt76_dev *dev);
1219void mt76u_queues_deinit(struct mt76_dev *dev);
1220
1221int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
1222               const struct mt76_bus_ops *bus_ops);
1223int mt76s_alloc_queues(struct mt76_dev *dev);
1224void mt76s_deinit(struct mt76_dev *dev);
1225
1226struct sk_buff *
1227mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
1228                   int data_len);
1229void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
1230struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
1231                                      unsigned long expires);
1232int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
1233                              int len, bool wait_resp, struct sk_buff **ret);
1234int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
1235                                  int cmd, bool wait_resp, struct sk_buff **ret);
1236int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
1237                           int len);
1238static inline int
1239mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len,
1240                  bool wait_resp)
1241{
1242        return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL);
1243}
1244
1245static inline int
1246mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd,
1247                      bool wait_resp)
1248{
1249        return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL);
1250}
1251
1252void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
1253
1254s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
1255                              struct ieee80211_channel *chan,
1256                              struct mt76_power_limits *dest,
1257                              s8 target_power);
1258
1259struct mt76_txwi_cache *
1260mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
1261int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
1262void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
1263
1264static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
1265{
1266        spin_lock_bh(&dev->token_lock);
1267        __mt76_set_tx_blocked(dev, blocked);
1268        spin_unlock_bh(&dev->token_lock);
1269}
1270
1271static inline int
1272mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
1273{
1274        int token;
1275
1276        spin_lock_bh(&dev->token_lock);
1277        token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
1278                          GFP_ATOMIC);
1279        spin_unlock_bh(&dev->token_lock);
1280
1281        return token;
1282}
1283
1284static inline struct mt76_txwi_cache *
1285mt76_token_put(struct mt76_dev *dev, int token)
1286{
1287        struct mt76_txwi_cache *txwi;
1288
1289        spin_lock_bh(&dev->token_lock);
1290        txwi = idr_remove(&dev->token, token);
1291        spin_unlock_bh(&dev->token_lock);
1292
1293        return txwi;
1294}
1295
1296static inline int
1297mt76_get_next_pkt_id(struct mt76_wcid *wcid)
1298{
1299        wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
1300        if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
1301            wcid->packet_id == MT_PACKET_ID_NO_SKB)
1302                wcid->packet_id = MT_PACKET_ID_FIRST;
1303
1304        return wcid->packet_id;
1305}
1306#endif
1307