linux/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
<<
>>
Prefs
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
   4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
   5 */
   6
   7#include "mt76x02.h"
   8#include "mt76x02_trace.h"
   9#include "trace.h"
  10
  11void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
  12{
  13        int i;
  14
  15        mt76_rr(dev, MT_RX_STAT_0);
  16        mt76_rr(dev, MT_RX_STAT_1);
  17        mt76_rr(dev, MT_RX_STAT_2);
  18        mt76_rr(dev, MT_TX_STA_0);
  19        mt76_rr(dev, MT_TX_STA_1);
  20        mt76_rr(dev, MT_TX_STA_2);
  21
  22        for (i = 0; i < 16; i++)
  23                mt76_rr(dev, MT_TX_AGG_CNT(i));
  24
  25        for (i = 0; i < 16; i++)
  26                mt76_rr(dev, MT_TX_STAT_FIFO);
  27
  28        memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
  29}
  30EXPORT_SYMBOL_GPL(mt76x02_mac_reset_counters);
  31
  32static enum mt76x02_cipher_type
  33mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
  34{
  35        memset(key_data, 0, 32);
  36        if (!key)
  37                return MT76X02_CIPHER_NONE;
  38
  39        if (key->keylen > 32)
  40                return MT76X02_CIPHER_NONE;
  41
  42        memcpy(key_data, key->key, key->keylen);
  43
  44        switch (key->cipher) {
  45        case WLAN_CIPHER_SUITE_WEP40:
  46                return MT76X02_CIPHER_WEP40;
  47        case WLAN_CIPHER_SUITE_WEP104:
  48                return MT76X02_CIPHER_WEP104;
  49        case WLAN_CIPHER_SUITE_TKIP:
  50                return MT76X02_CIPHER_TKIP;
  51        case WLAN_CIPHER_SUITE_CCMP:
  52                return MT76X02_CIPHER_AES_CCMP;
  53        default:
  54                return MT76X02_CIPHER_NONE;
  55        }
  56}
  57
  58int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
  59                                 u8 key_idx, struct ieee80211_key_conf *key)
  60{
  61        enum mt76x02_cipher_type cipher;
  62        u8 key_data[32];
  63        u32 val;
  64
  65        cipher = mt76x02_mac_get_key_info(key, key_data);
  66        if (cipher == MT76X02_CIPHER_NONE && key)
  67                return -EOPNOTSUPP;
  68
  69        val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
  70        val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
  71        val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
  72        mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
  73
  74        mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
  75                     sizeof(key_data));
  76
  77        return 0;
  78}
  79EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
  80
  81void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
  82                              struct ieee80211_key_conf *key)
  83{
  84        enum mt76x02_cipher_type cipher;
  85        u8 key_data[32];
  86        u32 iv, eiv;
  87        u64 pn;
  88
  89        cipher = mt76x02_mac_get_key_info(key, key_data);
  90        iv = mt76_rr(dev, MT_WCID_IV(idx));
  91        eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
  92
  93        pn = (u64)eiv << 16;
  94        if (cipher == MT76X02_CIPHER_TKIP) {
  95                pn |= (iv >> 16) & 0xff;
  96                pn |= (iv & 0xff) << 8;
  97        } else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
  98                pn |= iv & 0xffff;
  99        } else {
 100                return;
 101        }
 102
 103        atomic64_set(&key->tx_pn, pn);
 104}
 105
 106int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
 107                             struct ieee80211_key_conf *key)
 108{
 109        enum mt76x02_cipher_type cipher;
 110        u8 key_data[32];
 111        u8 iv_data[8];
 112        u64 pn;
 113
 114        cipher = mt76x02_mac_get_key_info(key, key_data);
 115        if (cipher == MT76X02_CIPHER_NONE && key)
 116                return -EOPNOTSUPP;
 117
 118        mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
 119        mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
 120
 121        memset(iv_data, 0, sizeof(iv_data));
 122        if (key) {
 123                mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
 124                               !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
 125
 126                pn = atomic64_read(&key->tx_pn);
 127
 128                iv_data[3] = key->keyidx << 6;
 129                if (cipher >= MT76X02_CIPHER_TKIP) {
 130                        iv_data[3] |= 0x20;
 131                        put_unaligned_le32(pn >> 16, &iv_data[4]);
 132                }
 133
 134                if (cipher == MT76X02_CIPHER_TKIP) {
 135                        iv_data[0] = (pn >> 8) & 0xff;
 136                        iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
 137                        iv_data[2] = pn & 0xff;
 138                } else if (cipher >= MT76X02_CIPHER_AES_CCMP) {
 139                        put_unaligned_le16((pn & 0xffff), &iv_data[0]);
 140                }
 141        }
 142
 143        mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
 144
 145        return 0;
 146}
 147
 148void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
 149                            u8 vif_idx, u8 *mac)
 150{
 151        struct mt76_wcid_addr addr = {};
 152        u32 attr;
 153
 154        attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
 155               FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
 156
 157        mt76_wr(dev, MT_WCID_ATTR(idx), attr);
 158
 159        if (idx >= 128)
 160                return;
 161
 162        if (mac)
 163                memcpy(addr.macaddr, mac, ETH_ALEN);
 164
 165        mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
 166}
 167EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
 168
 169void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
 170{
 171        u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
 172        u32 bit = MT_WCID_DROP_MASK(idx);
 173
 174        /* prevent unnecessary writes */
 175        if ((val & bit) != (bit * drop))
 176                mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
 177}
 178
 179static __le16
 180mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
 181                        const struct ieee80211_tx_rate *rate, u8 *nss_val)
 182{
 183        u8 phy, rate_idx, nss, bw = 0;
 184        u16 rateval;
 185
 186        if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
 187                rate_idx = rate->idx;
 188                nss = 1 + (rate->idx >> 4);
 189                phy = MT_PHY_TYPE_VHT;
 190                if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
 191                        bw = 2;
 192                else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
 193                        bw = 1;
 194        } else if (rate->flags & IEEE80211_TX_RC_MCS) {
 195                rate_idx = rate->idx;
 196                nss = 1 + (rate->idx >> 3);
 197                phy = MT_PHY_TYPE_HT;
 198                if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
 199                        phy = MT_PHY_TYPE_HT_GF;
 200                if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
 201                        bw = 1;
 202        } else {
 203                const struct ieee80211_rate *r;
 204                int band = dev->mphy.chandef.chan->band;
 205                u16 val;
 206
 207                r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
 208                if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 209                        val = r->hw_value_short;
 210                else
 211                        val = r->hw_value;
 212
 213                phy = val >> 8;
 214                rate_idx = val & 0xff;
 215                nss = 1;
 216        }
 217
 218        rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
 219        rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
 220        rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
 221        if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
 222                rateval |= MT_RXWI_RATE_SGI;
 223
 224        *nss_val = nss;
 225        return cpu_to_le16(rateval);
 226}
 227
 228void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
 229                               const struct ieee80211_tx_rate *rate)
 230{
 231        s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
 232        __le16 rateval;
 233        u32 tx_info;
 234        s8 nss;
 235
 236        rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
 237        tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) |
 238                  FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) |
 239                  FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) |
 240                  MT_WCID_TX_INFO_SET;
 241        wcid->tx_info = tx_info;
 242}
 243
 244void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
 245{
 246        if (enable)
 247                mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
 248        else
 249                mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
 250}
 251
 252bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
 253                                struct mt76x02_tx_status *stat)
 254{
 255        u32 stat1, stat2;
 256
 257        stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
 258        stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
 259
 260        stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
 261        if (!stat->valid)
 262                return false;
 263
 264        stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
 265        stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
 266        stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
 267        stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
 268        stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
 269
 270        stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
 271        stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
 272
 273        trace_mac_txstat_fetch(dev, stat);
 274
 275        return true;
 276}
 277
 278static int
 279mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
 280                            enum nl80211_band band)
 281{
 282        u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
 283
 284        txrate->idx = 0;
 285        txrate->flags = 0;
 286        txrate->count = 1;
 287
 288        switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
 289        case MT_PHY_TYPE_OFDM:
 290                if (band == NL80211_BAND_2GHZ)
 291                        idx += 4;
 292
 293                txrate->idx = idx;
 294                return 0;
 295        case MT_PHY_TYPE_CCK:
 296                if (idx >= 8)
 297                        idx -= 8;
 298
 299                txrate->idx = idx;
 300                return 0;
 301        case MT_PHY_TYPE_HT_GF:
 302                txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
 303                fallthrough;
 304        case MT_PHY_TYPE_HT:
 305                txrate->flags |= IEEE80211_TX_RC_MCS;
 306                txrate->idx = idx;
 307                break;
 308        case MT_PHY_TYPE_VHT:
 309                txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
 310                txrate->idx = idx;
 311                break;
 312        default:
 313                return -EINVAL;
 314        }
 315
 316        switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
 317        case MT_PHY_BW_20:
 318                break;
 319        case MT_PHY_BW_40:
 320                txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 321                break;
 322        case MT_PHY_BW_80:
 323                txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
 324                break;
 325        default:
 326                return -EINVAL;
 327        }
 328
 329        if (rate & MT_RXWI_RATE_SGI)
 330                txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
 331
 332        return 0;
 333}
 334
 335void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
 336                            struct sk_buff *skb, struct mt76_wcid *wcid,
 337                            struct ieee80211_sta *sta, int len)
 338{
 339        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 340        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 341        struct ieee80211_tx_rate *rate = &info->control.rates[0];
 342        struct ieee80211_key_conf *key = info->control.hw_key;
 343        u32 wcid_tx_info;
 344        u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
 345        u16 txwi_flags = 0;
 346        u8 nss;
 347        s8 txpwr_adj, max_txpwr_adj;
 348        u8 ccmp_pn[8], nstreams = dev->mphy.chainmask & 0xf;
 349
 350        memset(txwi, 0, sizeof(*txwi));
 351
 352        mt76_tx_check_agg_ssn(sta, skb);
 353
 354        if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
 355            ieee80211_has_protected(hdr->frame_control)) {
 356                wcid = NULL;
 357                ieee80211_get_tx_rates(info->control.vif, sta, skb,
 358                                       info->control.rates, 1);
 359        }
 360
 361        if (wcid)
 362                txwi->wcid = wcid->idx;
 363        else
 364                txwi->wcid = 0xff;
 365
 366        if (wcid && wcid->sw_iv && key) {
 367                u64 pn = atomic64_inc_return(&key->tx_pn);
 368
 369                ccmp_pn[0] = pn;
 370                ccmp_pn[1] = pn >> 8;
 371                ccmp_pn[2] = 0;
 372                ccmp_pn[3] = 0x20 | (key->keyidx << 6);
 373                ccmp_pn[4] = pn >> 16;
 374                ccmp_pn[5] = pn >> 24;
 375                ccmp_pn[6] = pn >> 32;
 376                ccmp_pn[7] = pn >> 40;
 377                txwi->iv = *((__le32 *)&ccmp_pn[0]);
 378                txwi->eiv = *((__le32 *)&ccmp_pn[4]);
 379        }
 380
 381        if (wcid && (rate->idx < 0 || !rate->count)) {
 382                wcid_tx_info = wcid->tx_info;
 383                txwi->rate = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
 384                max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
 385                                          wcid_tx_info);
 386                nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
 387        } else {
 388                txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
 389                max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
 390        }
 391
 392        txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
 393                                             max_txpwr_adj);
 394        txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
 395
 396        if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
 397                txwi->txstream = 0x13;
 398        else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
 399                 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
 400                txwi->txstream = 0x93;
 401
 402        if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
 403                txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
 404        if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
 405                txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
 406        if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
 407                txwi_flags |= MT_TXWI_FLAGS_MMPS;
 408        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
 409                txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
 410        if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
 411                txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
 412        if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
 413                u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
 414                u8 ampdu_density = sta->ht_cap.ampdu_density;
 415
 416                ba_size <<= sta->ht_cap.ampdu_factor;
 417                ba_size = min_t(int, 63, ba_size - 1);
 418                if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
 419                        ba_size = 0;
 420                txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
 421
 422                if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4)
 423                        ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
 424
 425                txwi_flags |= MT_TXWI_FLAGS_AMPDU |
 426                         FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, ampdu_density);
 427        }
 428
 429        if (ieee80211_is_probe_resp(hdr->frame_control) ||
 430            ieee80211_is_beacon(hdr->frame_control))
 431                txwi_flags |= MT_TXWI_FLAGS_TS;
 432
 433        txwi->flags |= cpu_to_le16(txwi_flags);
 434        txwi->len_ctl = cpu_to_le16(len);
 435}
 436EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
 437
 438static void
 439mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
 440{
 441        u8 mcs, nss;
 442
 443        if (!idx)
 444                return;
 445
 446        rates += idx - 1;
 447        rates[1] = rates[0];
 448        switch (phy) {
 449        case MT_PHY_TYPE_VHT:
 450                mcs = ieee80211_rate_get_vht_mcs(rates);
 451                nss = ieee80211_rate_get_vht_nss(rates);
 452
 453                if (mcs == 0)
 454                        nss = max_t(int, nss - 1, 1);
 455                else
 456                        mcs--;
 457
 458                ieee80211_rate_set_vht(rates + 1, mcs, nss);
 459                break;
 460        case MT_PHY_TYPE_HT_GF:
 461        case MT_PHY_TYPE_HT:
 462                /* MCS 8 falls back to MCS 0 */
 463                if (rates[0].idx == 8) {
 464                        rates[1].idx = 0;
 465                        break;
 466                }
 467                fallthrough;
 468        default:
 469                rates[1].idx = max_t(int, rates[0].idx - 1, 0);
 470                break;
 471        }
 472}
 473
 474static void
 475mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
 476                           struct ieee80211_tx_info *info,
 477                           struct mt76x02_tx_status *st, int n_frames)
 478{
 479        struct ieee80211_tx_rate *rate = info->status.rates;
 480        struct ieee80211_tx_rate last_rate;
 481        u16 first_rate;
 482        int retry = st->retry;
 483        int phy;
 484        int i;
 485
 486        if (!n_frames)
 487                return;
 488
 489        phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
 490
 491        if (st->pktid & MT_PACKET_ID_HAS_RATE) {
 492                first_rate = st->rate & ~MT_PKTID_RATE;
 493                first_rate |= st->pktid & MT_PKTID_RATE;
 494
 495                mt76x02_mac_process_tx_rate(&rate[0], first_rate,
 496                                            dev->mphy.chandef.chan->band);
 497        } else if (rate[0].idx < 0) {
 498                if (!msta)
 499                        return;
 500
 501                mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
 502                                            dev->mphy.chandef.chan->band);
 503        }
 504
 505        mt76x02_mac_process_tx_rate(&last_rate, st->rate,
 506                                    dev->mphy.chandef.chan->band);
 507
 508        for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
 509                retry--;
 510                if (i + 1 == ARRAY_SIZE(info->status.rates)) {
 511                        info->status.rates[i] = last_rate;
 512                        info->status.rates[i].count = max_t(int, retry, 1);
 513                        break;
 514                }
 515
 516                mt76x02_tx_rate_fallback(info->status.rates, i, phy);
 517                if (info->status.rates[i].idx == last_rate.idx)
 518                        break;
 519        }
 520
 521        if (i + 1 < ARRAY_SIZE(info->status.rates)) {
 522                info->status.rates[i + 1].idx = -1;
 523                info->status.rates[i + 1].count = 0;
 524        }
 525
 526        info->status.ampdu_len = n_frames;
 527        info->status.ampdu_ack_len = st->success ? n_frames : 0;
 528
 529        if (st->aggr)
 530                info->flags |= IEEE80211_TX_CTL_AMPDU |
 531                               IEEE80211_TX_STAT_AMPDU;
 532
 533        if (!st->ack_req)
 534                info->flags |= IEEE80211_TX_CTL_NO_ACK;
 535        else if (st->success)
 536                info->flags |= IEEE80211_TX_STAT_ACK;
 537}
 538
 539void mt76x02_send_tx_status(struct mt76x02_dev *dev,
 540                            struct mt76x02_tx_status *stat, u8 *update)
 541{
 542        struct ieee80211_tx_info info = {};
 543        struct ieee80211_tx_status status = {
 544                .info = &info
 545        };
 546        static const u8 ac_to_tid[4] = {
 547                [IEEE80211_AC_BE] = 0,
 548                [IEEE80211_AC_BK] = 1,
 549                [IEEE80211_AC_VI] = 4,
 550                [IEEE80211_AC_VO] = 6
 551        };
 552        struct mt76_wcid *wcid = NULL;
 553        struct mt76x02_sta *msta = NULL;
 554        struct mt76_dev *mdev = &dev->mt76;
 555        struct sk_buff_head list;
 556        u32 duration = 0;
 557        u8 cur_pktid;
 558        u32 ac = 0;
 559        int len = 0;
 560
 561        if (stat->pktid == MT_PACKET_ID_NO_ACK)
 562                return;
 563
 564        rcu_read_lock();
 565
 566        if (stat->wcid < MT76x02_N_WCIDS)
 567                wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
 568
 569        if (wcid && wcid->sta) {
 570                void *priv;
 571
 572                priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
 573                status.sta = container_of(priv, struct ieee80211_sta,
 574                                          drv_priv);
 575        }
 576
 577        mt76_tx_status_lock(mdev, &list);
 578
 579        if (wcid) {
 580                if (mt76_is_skb_pktid(stat->pktid))
 581                        status.skb = mt76_tx_status_skb_get(mdev, wcid,
 582                                                            stat->pktid, &list);
 583                if (status.skb)
 584                        status.info = IEEE80211_SKB_CB(status.skb);
 585        }
 586
 587        if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
 588                mt76_tx_status_unlock(mdev, &list);
 589                goto out;
 590        }
 591
 592
 593        if (msta && stat->aggr && !status.skb) {
 594                u32 stat_val, stat_cache;
 595
 596                stat_val = stat->rate;
 597                stat_val |= ((u32)stat->retry) << 16;
 598                stat_cache = msta->status.rate;
 599                stat_cache |= ((u32)msta->status.retry) << 16;
 600
 601                if (*update == 0 && stat_val == stat_cache &&
 602                    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
 603                        msta->n_frames++;
 604                        mt76_tx_status_unlock(mdev, &list);
 605                        goto out;
 606                }
 607
 608                cur_pktid = msta->status.pktid;
 609                mt76x02_mac_fill_tx_status(dev, msta, status.info,
 610                                           &msta->status, msta->n_frames);
 611
 612                msta->status = *stat;
 613                msta->n_frames = 1;
 614                *update = 0;
 615        } else {
 616                cur_pktid = stat->pktid;
 617                mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
 618                *update = 1;
 619        }
 620
 621        if (status.skb) {
 622                info = *status.info;
 623                len = status.skb->len;
 624                ac = skb_get_queue_mapping(status.skb);
 625                mt76_tx_status_skb_done(mdev, status.skb, &list);
 626        } else if (msta) {
 627                len = status.info->status.ampdu_len * ewma_pktlen_read(&msta->pktlen);
 628                ac = FIELD_GET(MT_PKTID_AC, cur_pktid);
 629        }
 630
 631        mt76_tx_status_unlock(mdev, &list);
 632
 633        if (!status.skb)
 634                ieee80211_tx_status_ext(mt76_hw(dev), &status);
 635
 636        if (!len)
 637                goto out;
 638
 639        duration = ieee80211_calc_tx_airtime(mt76_hw(dev), &info, len);
 640
 641        spin_lock_bh(&dev->mt76.cc_lock);
 642        dev->tx_airtime += duration;
 643        spin_unlock_bh(&dev->mt76.cc_lock);
 644
 645        if (msta)
 646                ieee80211_sta_register_airtime(status.sta, ac_to_tid[ac], duration, 0);
 647
 648out:
 649        rcu_read_unlock();
 650}
 651
 652static int
 653mt76x02_mac_process_rate(struct mt76x02_dev *dev,
 654                         struct mt76_rx_status *status,
 655                         u16 rate)
 656{
 657        u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
 658
 659        switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
 660        case MT_PHY_TYPE_OFDM:
 661                if (idx >= 8)
 662                        idx = 0;
 663
 664                if (status->band == NL80211_BAND_2GHZ)
 665                        idx += 4;
 666
 667                status->rate_idx = idx;
 668                return 0;
 669        case MT_PHY_TYPE_CCK:
 670                if (idx >= 8) {
 671                        idx -= 8;
 672                        status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
 673                }
 674
 675                if (idx >= 4)
 676                        idx = 0;
 677
 678                status->rate_idx = idx;
 679                return 0;
 680        case MT_PHY_TYPE_HT_GF:
 681                status->enc_flags |= RX_ENC_FLAG_HT_GF;
 682                fallthrough;
 683        case MT_PHY_TYPE_HT:
 684                status->encoding = RX_ENC_HT;
 685                status->rate_idx = idx;
 686                break;
 687        case MT_PHY_TYPE_VHT: {
 688                u8 n_rxstream = dev->mphy.chainmask & 0xf;
 689
 690                status->encoding = RX_ENC_VHT;
 691                status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
 692                status->nss = min_t(u8, n_rxstream,
 693                                    FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1);
 694                break;
 695        }
 696        default:
 697                return -EINVAL;
 698        }
 699
 700        if (rate & MT_RXWI_RATE_LDPC)
 701                status->enc_flags |= RX_ENC_FLAG_LDPC;
 702
 703        if (rate & MT_RXWI_RATE_SGI)
 704                status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
 705
 706        if (rate & MT_RXWI_RATE_STBC)
 707                status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
 708
 709        switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
 710        case MT_PHY_BW_20:
 711                break;
 712        case MT_PHY_BW_40:
 713                status->bw = RATE_INFO_BW_40;
 714                break;
 715        case MT_PHY_BW_80:
 716                status->bw = RATE_INFO_BW_80;
 717                break;
 718        default:
 719                break;
 720        }
 721
 722        return 0;
 723}
 724
 725void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
 726{
 727        static const u8 null_addr[ETH_ALEN] = {};
 728        int i;
 729
 730        ether_addr_copy(dev->mphy.macaddr, addr);
 731
 732        if (!is_valid_ether_addr(dev->mphy.macaddr)) {
 733                eth_random_addr(dev->mphy.macaddr);
 734                dev_info(dev->mt76.dev,
 735                         "Invalid MAC address, using random address %pM\n",
 736                         dev->mphy.macaddr);
 737        }
 738
 739        mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mphy.macaddr));
 740        mt76_wr(dev, MT_MAC_ADDR_DW1,
 741                get_unaligned_le16(dev->mphy.macaddr + 4) |
 742                FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
 743
 744        mt76_wr(dev, MT_MAC_BSSID_DW0,
 745                get_unaligned_le32(dev->mphy.macaddr));
 746        mt76_wr(dev, MT_MAC_BSSID_DW1,
 747                get_unaligned_le16(dev->mphy.macaddr + 4) |
 748                FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
 749                MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
 750        /* enable 7 additional beacon slots and control them with bypass mask */
 751        mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, 7);
 752
 753        for (i = 0; i < 16; i++)
 754                mt76x02_mac_set_bssid(dev, i, null_addr);
 755}
 756EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
 757
 758static int
 759mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
 760{
 761        struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
 762
 763        rssi += cal->rssi_offset[chain];
 764        rssi -= cal->lna_gain;
 765
 766        return rssi;
 767}
 768
 769int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
 770                           void *rxi)
 771{
 772        struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
 773        struct ieee80211_hdr *hdr;
 774        struct mt76x02_rxwi *rxwi = rxi;
 775        struct mt76x02_sta *sta;
 776        u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
 777        u32 ctl = le32_to_cpu(rxwi->ctl);
 778        u16 rate = le16_to_cpu(rxwi->rate);
 779        u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
 780        bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
 781        int pad_len = 0, nstreams = dev->mphy.chainmask & 0xf;
 782        s8 signal;
 783        u8 pn_len;
 784        u8 wcid;
 785        int len;
 786
 787        if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
 788                return -EINVAL;
 789
 790        if (rxinfo & MT_RXINFO_L2PAD)
 791                pad_len += 2;
 792
 793        if (rxinfo & MT_RXINFO_DECRYPT) {
 794                status->flag |= RX_FLAG_DECRYPTED;
 795                status->flag |= RX_FLAG_MMIC_STRIPPED;
 796                status->flag |= RX_FLAG_MIC_STRIPPED;
 797                status->flag |= RX_FLAG_IV_STRIPPED;
 798        }
 799
 800        wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
 801        sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
 802        status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast);
 803
 804        len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
 805        pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
 806        if (pn_len) {
 807                int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
 808                u8 *data = skb->data + offset;
 809
 810                status->iv[0] = data[7];
 811                status->iv[1] = data[6];
 812                status->iv[2] = data[5];
 813                status->iv[3] = data[4];
 814                status->iv[4] = data[1];
 815                status->iv[5] = data[0];
 816
 817                /*
 818                 * Driver CCMP validation can't deal with fragments.
 819                 * Let mac80211 take care of it.
 820                 */
 821                if (rxinfo & MT_RXINFO_FRAG) {
 822                        status->flag &= ~RX_FLAG_IV_STRIPPED;
 823                } else {
 824                        pad_len += pn_len << 2;
 825                        len -= pn_len << 2;
 826                }
 827        }
 828
 829        mt76x02_remove_hdr_pad(skb, pad_len);
 830
 831        if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
 832                status->aggr = true;
 833
 834        if (rxinfo & MT_RXINFO_AMPDU) {
 835                status->flag |= RX_FLAG_AMPDU_DETAILS;
 836                status->ampdu_ref = dev->ampdu_ref;
 837
 838                /*
 839                 * When receiving an A-MPDU subframe and RSSI info is not valid,
 840                 * we can assume that more subframes belonging to the same A-MPDU
 841                 * are coming. The last one will have valid RSSI info
 842                 */
 843                if (rxinfo & MT_RXINFO_RSSI) {
 844                        if (!++dev->ampdu_ref)
 845                                dev->ampdu_ref++;
 846                }
 847        }
 848
 849        if (WARN_ON_ONCE(len > skb->len))
 850                return -EINVAL;
 851
 852        pskb_trim(skb, len);
 853
 854        status->chains = BIT(0);
 855        signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
 856        status->chain_signal[0] = signal;
 857        if (nstreams > 1) {
 858                status->chains |= BIT(1);
 859                status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
 860                                                               rxwi->rssi[1],
 861                                                               1);
 862                signal = max_t(s8, signal, status->chain_signal[1]);
 863        }
 864        status->signal = signal;
 865        status->freq = dev->mphy.chandef.chan->center_freq;
 866        status->band = dev->mphy.chandef.chan->band;
 867
 868        hdr = (struct ieee80211_hdr *)skb->data;
 869        status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
 870        status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
 871
 872        return mt76x02_mac_process_rate(dev, status, rate);
 873}
 874
 875void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
 876{
 877        struct mt76x02_tx_status stat = {};
 878        u8 update = 1;
 879        bool ret;
 880
 881        if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
 882                return;
 883
 884        trace_mac_txstat_poll(dev);
 885
 886        while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
 887                if (!spin_trylock(&dev->txstatus_fifo_lock))
 888                        break;
 889
 890                ret = mt76x02_mac_load_tx_status(dev, &stat);
 891                spin_unlock(&dev->txstatus_fifo_lock);
 892
 893                if (!ret)
 894                        break;
 895
 896                if (!irq) {
 897                        mt76x02_send_tx_status(dev, &stat, &update);
 898                        continue;
 899                }
 900
 901                kfifo_put(&dev->txstatus_fifo, stat);
 902        }
 903}
 904
 905void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
 906{
 907        struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
 908        struct mt76x02_txwi *txwi;
 909        u8 *txwi_ptr;
 910
 911        if (!e->txwi) {
 912                dev_kfree_skb_any(e->skb);
 913                return;
 914        }
 915
 916        mt76x02_mac_poll_tx_status(dev, false);
 917
 918        txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
 919        txwi = (struct mt76x02_txwi *)txwi_ptr;
 920        trace_mac_txdone(mdev, txwi->wcid, txwi->pktid);
 921
 922        mt76_tx_complete_skb(mdev, e->wcid, e->skb);
 923}
 924EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
 925
 926void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val)
 927{
 928        u32 data = 0;
 929
 930        if (val != ~0)
 931                data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
 932                       MT_PROT_CFG_RTS_THRESH;
 933
 934        mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
 935
 936        mt76_rmw(dev, MT_CCK_PROT_CFG,
 937                 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
 938        mt76_rmw(dev, MT_OFDM_PROT_CFG,
 939                 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
 940}
 941
 942void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
 943                                   int ht_mode)
 944{
 945        int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
 946        bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
 947        u32 prot[6];
 948        u32 vht_prot[3];
 949        int i;
 950        u16 rts_thr;
 951
 952        for (i = 0; i < ARRAY_SIZE(prot); i++) {
 953                prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4);
 954                prot[i] &= ~MT_PROT_CFG_CTRL;
 955                if (i >= 2)
 956                        prot[i] &= ~MT_PROT_CFG_RATE;
 957        }
 958
 959        for (i = 0; i < ARRAY_SIZE(vht_prot); i++) {
 960                vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4);
 961                vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE);
 962        }
 963
 964        rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH);
 965
 966        if (rts_thr != 0xffff)
 967                prot[0] |= MT_PROT_CTRL_RTS_CTS;
 968
 969        if (legacy_prot) {
 970                prot[1] |= MT_PROT_CTRL_CTS2SELF;
 971
 972                prot[2] |= MT_PROT_RATE_CCK_11;
 973                prot[3] |= MT_PROT_RATE_CCK_11;
 974                prot[4] |= MT_PROT_RATE_CCK_11;
 975                prot[5] |= MT_PROT_RATE_CCK_11;
 976
 977                vht_prot[0] |= MT_PROT_RATE_CCK_11;
 978                vht_prot[1] |= MT_PROT_RATE_CCK_11;
 979                vht_prot[2] |= MT_PROT_RATE_CCK_11;
 980        } else {
 981                if (rts_thr != 0xffff)
 982                        prot[1] |= MT_PROT_CTRL_RTS_CTS;
 983
 984                prot[2] |= MT_PROT_RATE_OFDM_24;
 985                prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
 986                prot[4] |= MT_PROT_RATE_OFDM_24;
 987                prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
 988
 989                vht_prot[0] |= MT_PROT_RATE_OFDM_24;
 990                vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24;
 991                vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24;
 992        }
 993
 994        switch (mode) {
 995        case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
 996        case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
 997                prot[2] |= MT_PROT_CTRL_RTS_CTS;
 998                prot[3] |= MT_PROT_CTRL_RTS_CTS;
 999                prot[4] |= MT_PROT_CTRL_RTS_CTS;
1000                prot[5] |= MT_PROT_CTRL_RTS_CTS;
1001                vht_prot[0] |= MT_PROT_CTRL_RTS_CTS;
1002                vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
1003                vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
1004                break;
1005        case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
1006                prot[3] |= MT_PROT_CTRL_RTS_CTS;
1007                prot[5] |= MT_PROT_CTRL_RTS_CTS;
1008                vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
1009                vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
1010                break;
1011        }
1012
1013        if (non_gf) {
1014                prot[4] |= MT_PROT_CTRL_RTS_CTS;
1015                prot[5] |= MT_PROT_CTRL_RTS_CTS;
1016        }
1017
1018        for (i = 0; i < ARRAY_SIZE(prot); i++)
1019                mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
1020
1021        for (i = 0; i < ARRAY_SIZE(vht_prot); i++)
1022                mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
1023}
1024
1025void mt76x02_update_channel(struct mt76_phy *mphy)
1026{
1027        struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
1028        struct mt76_channel_state *state;
1029
1030        state = mphy->chan_state;
1031        state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
1032
1033        spin_lock_bh(&dev->mt76.cc_lock);
1034        state->cc_tx += dev->tx_airtime;
1035        dev->tx_airtime = 0;
1036        spin_unlock_bh(&dev->mt76.cc_lock);
1037}
1038EXPORT_SYMBOL_GPL(mt76x02_update_channel);
1039
1040static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
1041{
1042        u32 val = mt76_rr(dev, 0x10f4);
1043
1044        if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
1045                return;
1046
1047        dev_err(dev->mt76.dev, "mac specific condition occurred\n");
1048
1049        mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
1050        udelay(10);
1051        mt76_wr(dev, MT_MAC_SYS_CTRL,
1052                MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
1053}
1054
1055static void
1056mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
1057{
1058        if (enable) {
1059                u32 data;
1060
1061                mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
1062                mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
1063                /* enable pa-lna */
1064                data = mt76_rr(dev, MT_TX_PIN_CFG);
1065                data |= MT_TX_PIN_CFG_TXANT |
1066                        MT_TX_PIN_CFG_RXANT |
1067                        MT_TX_PIN_RFTR_EN |
1068                        MT_TX_PIN_TRSW_EN;
1069                mt76_wr(dev, MT_TX_PIN_CFG, data);
1070        } else {
1071                mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
1072                mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
1073                /* disable pa-lna */
1074                mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT);
1075                mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT);
1076        }
1077        dev->ed_tx_blocked = !enable;
1078}
1079
1080void mt76x02_edcca_init(struct mt76x02_dev *dev)
1081{
1082        dev->ed_trigger = 0;
1083        dev->ed_silent = 0;
1084
1085        if (dev->ed_monitor) {
1086                struct ieee80211_channel *chan = dev->mphy.chandef.chan;
1087                u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
1088
1089                mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
1090                mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
1091                mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
1092                         ed_th << 8 | ed_th);
1093                mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
1094        } else {
1095                mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
1096                mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
1097                if (is_mt76x2(dev)) {
1098                        mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
1099                        mt76_set(dev, MT_TXOP_HLDR_ET,
1100                                 MT_TXOP_HLDR_TX40M_BLK_EN);
1101                } else {
1102                        mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
1103                        mt76_clear(dev, MT_TXOP_HLDR_ET,
1104                                   MT_TXOP_HLDR_TX40M_BLK_EN);
1105                }
1106        }
1107        mt76x02_edcca_tx_enable(dev, true);
1108        dev->ed_monitor_learning = true;
1109
1110        /* clear previous CCA timer value */
1111        mt76_rr(dev, MT_ED_CCA_TIMER);
1112        dev->ed_time = ktime_get_boottime();
1113}
1114EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
1115
1116#define MT_EDCCA_TH             92
1117#define MT_EDCCA_BLOCK_TH       2
1118#define MT_EDCCA_LEARN_TH       50
1119#define MT_EDCCA_LEARN_CCA      180
1120#define MT_EDCCA_LEARN_TIMEOUT  (20 * HZ)
1121
1122static void mt76x02_edcca_check(struct mt76x02_dev *dev)
1123{
1124        ktime_t cur_time;
1125        u32 active, val, busy;
1126
1127        cur_time = ktime_get_boottime();
1128        val = mt76_rr(dev, MT_ED_CCA_TIMER);
1129
1130        active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
1131        dev->ed_time = cur_time;
1132
1133        busy = (val * 100) / active;
1134        busy = min_t(u32, busy, 100);
1135
1136        if (busy > MT_EDCCA_TH) {
1137                dev->ed_trigger++;
1138                dev->ed_silent = 0;
1139        } else {
1140                dev->ed_silent++;
1141                dev->ed_trigger = 0;
1142        }
1143
1144        if (dev->cal.agc_lowest_gain &&
1145            dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
1146            dev->ed_trigger > MT_EDCCA_LEARN_TH) {
1147                dev->ed_monitor_learning = false;
1148                dev->ed_trigger_timeout = jiffies + 20 * HZ;
1149        } else if (!dev->ed_monitor_learning &&
1150                   time_is_after_jiffies(dev->ed_trigger_timeout)) {
1151                dev->ed_monitor_learning = true;
1152                mt76x02_edcca_tx_enable(dev, true);
1153        }
1154
1155        if (dev->ed_monitor_learning)
1156                return;
1157
1158        if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
1159                mt76x02_edcca_tx_enable(dev, false);
1160        else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
1161                mt76x02_edcca_tx_enable(dev, true);
1162}
1163
1164void mt76x02_mac_work(struct work_struct *work)
1165{
1166        struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
1167                                               mphy.mac_work.work);
1168        int i, idx;
1169
1170        mutex_lock(&dev->mt76.mutex);
1171
1172        mt76_update_survey(&dev->mphy);
1173        for (i = 0, idx = 0; i < 16; i++) {
1174                u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
1175
1176                dev->mt76.aggr_stats[idx++] += val & 0xffff;
1177                dev->mt76.aggr_stats[idx++] += val >> 16;
1178        }
1179
1180        if (!dev->mt76.beacon_mask)
1181                mt76x02_check_mac_err(dev);
1182
1183        if (dev->ed_monitor)
1184                mt76x02_edcca_check(dev);
1185
1186        mutex_unlock(&dev->mt76.mutex);
1187
1188        mt76_tx_status_check(&dev->mt76, NULL, false);
1189
1190        ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1191                                     MT_MAC_WORK_INTERVAL);
1192}
1193
1194void mt76x02_mac_cc_reset(struct mt76x02_dev *dev)
1195{
1196        dev->mphy.survey_time = ktime_get_boottime();
1197
1198        mt76_wr(dev, MT_CH_TIME_CFG,
1199                MT_CH_TIME_CFG_TIMER_EN |
1200                MT_CH_TIME_CFG_TX_AS_BUSY |
1201                MT_CH_TIME_CFG_RX_AS_BUSY |
1202                MT_CH_TIME_CFG_NAV_AS_BUSY |
1203                MT_CH_TIME_CFG_EIFS_AS_BUSY |
1204                MT_CH_CCA_RC_EN |
1205                FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
1206
1207        /* channel cycle counters read-and-clear */
1208        mt76_rr(dev, MT_CH_BUSY);
1209        mt76_rr(dev, MT_CH_IDLE);
1210}
1211EXPORT_SYMBOL_GPL(mt76x02_mac_cc_reset);
1212
1213void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
1214{
1215        idx &= 7;
1216        mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
1217        mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
1218                       get_unaligned_le16(addr + 4));
1219}
1220