linux/drivers/net/wireless/ath/ath9k/mac.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2008-2009 Atheros Communications Inc.
   3 *
   4 * Permission to use, copy, modify, and/or distribute this software for any
   5 * purpose with or without fee is hereby granted, provided that the above
   6 * copyright notice and this permission notice appear in all copies.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15 */
  16
  17#include "ath9k.h"
  18
  19static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
  20                                        struct ath9k_tx_queue_info *qi)
  21{
  22        DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
  23                "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
  24                ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
  25                ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
  26                ah->txurn_interrupt_mask);
  27
  28        REG_WRITE(ah, AR_IMR_S0,
  29                  SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
  30                  | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
  31        REG_WRITE(ah, AR_IMR_S1,
  32                  SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
  33                  | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
  34        REG_RMW_FIELD(ah, AR_IMR_S2,
  35                      AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask);
  36}
  37
  38u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
  39{
  40        return REG_READ(ah, AR_QTXDP(q));
  41}
  42
  43void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
  44{
  45        REG_WRITE(ah, AR_QTXDP(q), txdp);
  46}
  47
  48void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
  49{
  50        DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Enable TXE on queue: %u\n", q);
  51        REG_WRITE(ah, AR_Q_TXE, 1 << q);
  52}
  53
  54u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
  55{
  56        u32 npend;
  57
  58        npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
  59        if (npend == 0) {
  60
  61                if (REG_READ(ah, AR_Q_TXE) & (1 << q))
  62                        npend = 1;
  63        }
  64
  65        return npend;
  66}
  67
  68bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
  69{
  70        u32 txcfg, curLevel, newLevel;
  71        enum ath9k_int omask;
  72
  73        if (ah->tx_trig_level >= MAX_TX_FIFO_THRESHOLD)
  74                return false;
  75
  76        omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
  77
  78        txcfg = REG_READ(ah, AR_TXCFG);
  79        curLevel = MS(txcfg, AR_FTRIG);
  80        newLevel = curLevel;
  81        if (bIncTrigLevel) {
  82                if (curLevel < MAX_TX_FIFO_THRESHOLD)
  83                        newLevel++;
  84        } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
  85                newLevel--;
  86        if (newLevel != curLevel)
  87                REG_WRITE(ah, AR_TXCFG,
  88                          (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
  89
  90        ath9k_hw_set_interrupts(ah, omask);
  91
  92        ah->tx_trig_level = newLevel;
  93
  94        return newLevel != curLevel;
  95}
  96
  97bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
  98{
  99#define ATH9K_TX_STOP_DMA_TIMEOUT       4000    /* usec */
 100#define ATH9K_TIME_QUANTUM              100     /* usec */
 101
 102        struct ath9k_hw_capabilities *pCap = &ah->caps;
 103        struct ath9k_tx_queue_info *qi;
 104        u32 tsfLow, j, wait;
 105        u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
 106
 107        if (q >= pCap->total_queues) {
 108                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, "
 109                        "invalid queue: %u\n", q);
 110                return false;
 111        }
 112
 113        qi = &ah->txq[q];
 114        if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
 115                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, "
 116                        "inactive queue: %u\n", q);
 117                return false;
 118        }
 119
 120        REG_WRITE(ah, AR_Q_TXD, 1 << q);
 121
 122        for (wait = wait_time; wait != 0; wait--) {
 123                if (ath9k_hw_numtxpending(ah, q) == 0)
 124                        break;
 125                udelay(ATH9K_TIME_QUANTUM);
 126        }
 127
 128        if (ath9k_hw_numtxpending(ah, q)) {
 129                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
 130                        "%s: Num of pending TX Frames %d on Q %d\n",
 131                        __func__, ath9k_hw_numtxpending(ah, q), q);
 132
 133                for (j = 0; j < 2; j++) {
 134                        tsfLow = REG_READ(ah, AR_TSF_L32);
 135                        REG_WRITE(ah, AR_QUIET2,
 136                                  SM(10, AR_QUIET2_QUIET_DUR));
 137                        REG_WRITE(ah, AR_QUIET_PERIOD, 100);
 138                        REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
 139                        REG_SET_BIT(ah, AR_TIMER_MODE,
 140                                       AR_QUIET_TIMER_EN);
 141
 142                        if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
 143                                break;
 144
 145                        DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
 146                                "TSF has moved while trying to set "
 147                                "quiet time TSF: 0x%08x\n", tsfLow);
 148                }
 149
 150                REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
 151
 152                udelay(200);
 153                REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
 154
 155                wait = wait_time;
 156                while (ath9k_hw_numtxpending(ah, q)) {
 157                        if ((--wait) == 0) {
 158                                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
 159                                        "Failed to stop TX DMA in 100 "
 160                                        "msec after killing last frame\n");
 161                                break;
 162                        }
 163                        udelay(ATH9K_TIME_QUANTUM);
 164                }
 165
 166                REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
 167        }
 168
 169        REG_WRITE(ah, AR_Q_TXD, 0);
 170        return wait != 0;
 171
 172#undef ATH9K_TX_STOP_DMA_TIMEOUT
 173#undef ATH9K_TIME_QUANTUM
 174}
 175
 176void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
 177                         u32 segLen, bool firstSeg,
 178                         bool lastSeg, const struct ath_desc *ds0)
 179{
 180        struct ar5416_desc *ads = AR5416DESC(ds);
 181
 182        if (firstSeg) {
 183                ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
 184        } else if (lastSeg) {
 185                ads->ds_ctl0 = 0;
 186                ads->ds_ctl1 = segLen;
 187                ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
 188                ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
 189        } else {
 190                ads->ds_ctl0 = 0;
 191                ads->ds_ctl1 = segLen | AR_TxMore;
 192                ads->ds_ctl2 = 0;
 193                ads->ds_ctl3 = 0;
 194        }
 195        ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
 196        ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
 197        ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
 198        ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
 199        ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
 200}
 201
 202void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
 203{
 204        struct ar5416_desc *ads = AR5416DESC(ds);
 205
 206        ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
 207        ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
 208        ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
 209        ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
 210        ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
 211}
 212
 213int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
 214{
 215        struct ar5416_desc *ads = AR5416DESC(ds);
 216
 217        if ((ads->ds_txstatus9 & AR_TxDone) == 0)
 218                return -EINPROGRESS;
 219
 220        ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
 221        ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
 222        ds->ds_txstat.ts_status = 0;
 223        ds->ds_txstat.ts_flags = 0;
 224
 225        if (ads->ds_txstatus1 & AR_ExcessiveRetries)
 226                ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
 227        if (ads->ds_txstatus1 & AR_Filtered)
 228                ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
 229        if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
 230                ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
 231                ath9k_hw_updatetxtriglevel(ah, true);
 232        }
 233        if (ads->ds_txstatus9 & AR_TxOpExceeded)
 234                ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
 235        if (ads->ds_txstatus1 & AR_TxTimerExpired)
 236                ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
 237
 238        if (ads->ds_txstatus1 & AR_DescCfgErr)
 239                ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
 240        if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
 241                ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
 242                ath9k_hw_updatetxtriglevel(ah, true);
 243        }
 244        if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
 245                ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
 246                ath9k_hw_updatetxtriglevel(ah, true);
 247        }
 248        if (ads->ds_txstatus0 & AR_TxBaStatus) {
 249                ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
 250                ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
 251                ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
 252        }
 253
 254        ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
 255        switch (ds->ds_txstat.ts_rateindex) {
 256        case 0:
 257                ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
 258                break;
 259        case 1:
 260                ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
 261                break;
 262        case 2:
 263                ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
 264                break;
 265        case 3:
 266                ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
 267                break;
 268        }
 269
 270        ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
 271        ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
 272        ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
 273        ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
 274        ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
 275        ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
 276        ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
 277        ds->ds_txstat.evm0 = ads->AR_TxEVM0;
 278        ds->ds_txstat.evm1 = ads->AR_TxEVM1;
 279        ds->ds_txstat.evm2 = ads->AR_TxEVM2;
 280        ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
 281        ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
 282        ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
 283        ds->ds_txstat.ts_antenna = 0;
 284
 285        return 0;
 286}
 287
 288void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
 289                            u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
 290                            u32 keyIx, enum ath9k_key_type keyType, u32 flags)
 291{
 292        struct ar5416_desc *ads = AR5416DESC(ds);
 293
 294        txPower += ah->txpower_indexoffset;
 295        if (txPower > 63)
 296                txPower = 63;
 297
 298        ads->ds_ctl0 = (pktLen & AR_FrameLen)
 299                | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
 300                | SM(txPower, AR_XmitPower)
 301                | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
 302                | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
 303                | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
 304                | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
 305
 306        ads->ds_ctl1 =
 307                (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
 308                | SM(type, AR_FrameType)
 309                | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
 310                | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
 311                | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
 312
 313        ads->ds_ctl6 = SM(keyType, AR_EncrType);
 314
 315        if (AR_SREV_9285(ah)) {
 316                ads->ds_ctl8 = 0;
 317                ads->ds_ctl9 = 0;
 318                ads->ds_ctl10 = 0;
 319                ads->ds_ctl11 = 0;
 320        }
 321}
 322
 323void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
 324                                  struct ath_desc *lastds,
 325                                  u32 durUpdateEn, u32 rtsctsRate,
 326                                  u32 rtsctsDuration,
 327                                  struct ath9k_11n_rate_series series[],
 328                                  u32 nseries, u32 flags)
 329{
 330        struct ar5416_desc *ads = AR5416DESC(ds);
 331        struct ar5416_desc *last_ads = AR5416DESC(lastds);
 332        u32 ds_ctl0;
 333
 334        if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
 335                ds_ctl0 = ads->ds_ctl0;
 336
 337                if (flags & ATH9K_TXDESC_RTSENA) {
 338                        ds_ctl0 &= ~AR_CTSEnable;
 339                        ds_ctl0 |= AR_RTSEnable;
 340                } else {
 341                        ds_ctl0 &= ~AR_RTSEnable;
 342                        ds_ctl0 |= AR_CTSEnable;
 343                }
 344
 345                ads->ds_ctl0 = ds_ctl0;
 346        } else {
 347                ads->ds_ctl0 =
 348                        (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
 349        }
 350
 351        ads->ds_ctl2 = set11nTries(series, 0)
 352                | set11nTries(series, 1)
 353                | set11nTries(series, 2)
 354                | set11nTries(series, 3)
 355                | (durUpdateEn ? AR_DurUpdateEna : 0)
 356                | SM(0, AR_BurstDur);
 357
 358        ads->ds_ctl3 = set11nRate(series, 0)
 359                | set11nRate(series, 1)
 360                | set11nRate(series, 2)
 361                | set11nRate(series, 3);
 362
 363        ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
 364                | set11nPktDurRTSCTS(series, 1);
 365
 366        ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
 367                | set11nPktDurRTSCTS(series, 3);
 368
 369        ads->ds_ctl7 = set11nRateFlags(series, 0)
 370                | set11nRateFlags(series, 1)
 371                | set11nRateFlags(series, 2)
 372                | set11nRateFlags(series, 3)
 373                | SM(rtsctsRate, AR_RTSCTSRate);
 374        last_ads->ds_ctl2 = ads->ds_ctl2;
 375        last_ads->ds_ctl3 = ads->ds_ctl3;
 376}
 377
 378void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
 379                                u32 aggrLen)
 380{
 381        struct ar5416_desc *ads = AR5416DESC(ds);
 382
 383        ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
 384        ads->ds_ctl6 &= ~AR_AggrLen;
 385        ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
 386}
 387
 388void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
 389                                 u32 numDelims)
 390{
 391        struct ar5416_desc *ads = AR5416DESC(ds);
 392        unsigned int ctl6;
 393
 394        ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
 395
 396        ctl6 = ads->ds_ctl6;
 397        ctl6 &= ~AR_PadDelim;
 398        ctl6 |= SM(numDelims, AR_PadDelim);
 399        ads->ds_ctl6 = ctl6;
 400}
 401
 402void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
 403{
 404        struct ar5416_desc *ads = AR5416DESC(ds);
 405
 406        ads->ds_ctl1 |= AR_IsAggr;
 407        ads->ds_ctl1 &= ~AR_MoreAggr;
 408        ads->ds_ctl6 &= ~AR_PadDelim;
 409}
 410
 411void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
 412{
 413        struct ar5416_desc *ads = AR5416DESC(ds);
 414
 415        ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
 416}
 417
 418void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
 419                                   u32 burstDuration)
 420{
 421        struct ar5416_desc *ads = AR5416DESC(ds);
 422
 423        ads->ds_ctl2 &= ~AR_BurstDur;
 424        ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
 425}
 426
 427void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
 428                                     u32 vmf)
 429{
 430        struct ar5416_desc *ads = AR5416DESC(ds);
 431
 432        if (vmf)
 433                ads->ds_ctl0 |= AR_VirtMoreFrag;
 434        else
 435                ads->ds_ctl0 &= ~AR_VirtMoreFrag;
 436}
 437
 438void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
 439{
 440        *txqs &= ah->intr_txqs;
 441        ah->intr_txqs &= ~(*txqs);
 442}
 443
 444bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
 445                            const struct ath9k_tx_queue_info *qinfo)
 446{
 447        u32 cw;
 448        struct ath9k_hw_capabilities *pCap = &ah->caps;
 449        struct ath9k_tx_queue_info *qi;
 450
 451        if (q >= pCap->total_queues) {
 452                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, "
 453                        "invalid queue: %u\n", q);
 454                return false;
 455        }
 456
 457        qi = &ah->txq[q];
 458        if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
 459                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, "
 460                        "inactive queue: %u\n", q);
 461                return false;
 462        }
 463
 464        DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
 465
 466        qi->tqi_ver = qinfo->tqi_ver;
 467        qi->tqi_subtype = qinfo->tqi_subtype;
 468        qi->tqi_qflags = qinfo->tqi_qflags;
 469        qi->tqi_priority = qinfo->tqi_priority;
 470        if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
 471                qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
 472        else
 473                qi->tqi_aifs = INIT_AIFS;
 474        if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
 475                cw = min(qinfo->tqi_cwmin, 1024U);
 476                qi->tqi_cwmin = 1;
 477                while (qi->tqi_cwmin < cw)
 478                        qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
 479        } else
 480                qi->tqi_cwmin = qinfo->tqi_cwmin;
 481        if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
 482                cw = min(qinfo->tqi_cwmax, 1024U);
 483                qi->tqi_cwmax = 1;
 484                while (qi->tqi_cwmax < cw)
 485                        qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
 486        } else
 487                qi->tqi_cwmax = INIT_CWMAX;
 488
 489        if (qinfo->tqi_shretry != 0)
 490                qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
 491        else
 492                qi->tqi_shretry = INIT_SH_RETRY;
 493        if (qinfo->tqi_lgretry != 0)
 494                qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
 495        else
 496                qi->tqi_lgretry = INIT_LG_RETRY;
 497        qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
 498        qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
 499        qi->tqi_burstTime = qinfo->tqi_burstTime;
 500        qi->tqi_readyTime = qinfo->tqi_readyTime;
 501
 502        switch (qinfo->tqi_subtype) {
 503        case ATH9K_WME_UPSD:
 504                if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
 505                        qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
 506                break;
 507        default:
 508                break;
 509        }
 510
 511        return true;
 512}
 513
 514bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
 515                            struct ath9k_tx_queue_info *qinfo)
 516{
 517        struct ath9k_hw_capabilities *pCap = &ah->caps;
 518        struct ath9k_tx_queue_info *qi;
 519
 520        if (q >= pCap->total_queues) {
 521                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, "
 522                        "invalid queue: %u\n", q);
 523                return false;
 524        }
 525
 526        qi = &ah->txq[q];
 527        if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
 528                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, "
 529                        "inactive queue: %u\n", q);
 530                return false;
 531        }
 532
 533        qinfo->tqi_qflags = qi->tqi_qflags;
 534        qinfo->tqi_ver = qi->tqi_ver;
 535        qinfo->tqi_subtype = qi->tqi_subtype;
 536        qinfo->tqi_qflags = qi->tqi_qflags;
 537        qinfo->tqi_priority = qi->tqi_priority;
 538        qinfo->tqi_aifs = qi->tqi_aifs;
 539        qinfo->tqi_cwmin = qi->tqi_cwmin;
 540        qinfo->tqi_cwmax = qi->tqi_cwmax;
 541        qinfo->tqi_shretry = qi->tqi_shretry;
 542        qinfo->tqi_lgretry = qi->tqi_lgretry;
 543        qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
 544        qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
 545        qinfo->tqi_burstTime = qi->tqi_burstTime;
 546        qinfo->tqi_readyTime = qi->tqi_readyTime;
 547
 548        return true;
 549}
 550
 551int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
 552                          const struct ath9k_tx_queue_info *qinfo)
 553{
 554        struct ath9k_tx_queue_info *qi;
 555        struct ath9k_hw_capabilities *pCap = &ah->caps;
 556        int q;
 557
 558        switch (type) {
 559        case ATH9K_TX_QUEUE_BEACON:
 560                q = pCap->total_queues - 1;
 561                break;
 562        case ATH9K_TX_QUEUE_CAB:
 563                q = pCap->total_queues - 2;
 564                break;
 565        case ATH9K_TX_QUEUE_PSPOLL:
 566                q = 1;
 567                break;
 568        case ATH9K_TX_QUEUE_UAPSD:
 569                q = pCap->total_queues - 3;
 570                break;
 571        case ATH9K_TX_QUEUE_DATA:
 572                for (q = 0; q < pCap->total_queues; q++)
 573                        if (ah->txq[q].tqi_type ==
 574                            ATH9K_TX_QUEUE_INACTIVE)
 575                                break;
 576                if (q == pCap->total_queues) {
 577                        DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
 578                                "No available TX queue\n");
 579                        return -1;
 580                }
 581                break;
 582        default:
 583                DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Invalid TX queue type: %u\n",
 584                        type);
 585                return -1;
 586        }
 587
 588        DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
 589
 590        qi = &ah->txq[q];
 591        if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
 592                DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
 593                        "TX queue: %u already active\n", q);
 594                return -1;
 595        }
 596        memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
 597        qi->tqi_type = type;
 598        if (qinfo == NULL) {
 599                qi->tqi_qflags =
 600                        TXQ_FLAG_TXOKINT_ENABLE
 601                        | TXQ_FLAG_TXERRINT_ENABLE
 602                        | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
 603                qi->tqi_aifs = INIT_AIFS;
 604                qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
 605                qi->tqi_cwmax = INIT_CWMAX;
 606                qi->tqi_shretry = INIT_SH_RETRY;
 607                qi->tqi_lgretry = INIT_LG_RETRY;
 608                qi->tqi_physCompBuf = 0;
 609        } else {
 610                qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
 611                (void) ath9k_hw_set_txq_props(ah, q, qinfo);
 612        }
 613
 614        return q;
 615}
 616
 617bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
 618{
 619        struct ath9k_hw_capabilities *pCap = &ah->caps;
 620        struct ath9k_tx_queue_info *qi;
 621
 622        if (q >= pCap->total_queues) {
 623                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, "
 624                        "invalid queue: %u\n", q);
 625                return false;
 626        }
 627        qi = &ah->txq[q];
 628        if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
 629                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, "
 630                        "inactive queue: %u\n", q);
 631                return false;
 632        }
 633
 634        DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
 635
 636        qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
 637        ah->txok_interrupt_mask &= ~(1 << q);
 638        ah->txerr_interrupt_mask &= ~(1 << q);
 639        ah->txdesc_interrupt_mask &= ~(1 << q);
 640        ah->txeol_interrupt_mask &= ~(1 << q);
 641        ah->txurn_interrupt_mask &= ~(1 << q);
 642        ath9k_hw_set_txq_interrupts(ah, qi);
 643
 644        return true;
 645}
 646
 647bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
 648{
 649        struct ath9k_hw_capabilities *pCap = &ah->caps;
 650        struct ath9k_channel *chan = ah->curchan;
 651        struct ath9k_tx_queue_info *qi;
 652        u32 cwMin, chanCwMin, value;
 653
 654        if (q >= pCap->total_queues) {
 655                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, "
 656                        "invalid queue: %u\n", q);
 657                return false;
 658        }
 659
 660        qi = &ah->txq[q];
 661        if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
 662                DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, "
 663                        "inactive queue: %u\n", q);
 664                return true;
 665        }
 666
 667        DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
 668
 669        if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
 670                if (chan && IS_CHAN_B(chan))
 671                        chanCwMin = INIT_CWMIN_11B;
 672                else
 673                        chanCwMin = INIT_CWMIN;
 674
 675                for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
 676        } else
 677                cwMin = qi->tqi_cwmin;
 678
 679        REG_WRITE(ah, AR_DLCL_IFS(q),
 680                  SM(cwMin, AR_D_LCL_IFS_CWMIN) |
 681                  SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
 682                  SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
 683
 684        REG_WRITE(ah, AR_DRETRY_LIMIT(q),
 685                  SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
 686                  SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
 687                  SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
 688
 689        REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
 690        REG_WRITE(ah, AR_DMISC(q),
 691                  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
 692
 693        if (qi->tqi_cbrPeriod) {
 694                REG_WRITE(ah, AR_QCBRCFG(q),
 695                          SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
 696                          SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
 697                REG_WRITE(ah, AR_QMISC(q),
 698                          REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
 699                          (qi->tqi_cbrOverflowLimit ?
 700                           AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
 701        }
 702        if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
 703                REG_WRITE(ah, AR_QRDYTIMECFG(q),
 704                          SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
 705                          AR_Q_RDYTIMECFG_EN);
 706        }
 707
 708        REG_WRITE(ah, AR_DCHNTIME(q),
 709                  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
 710                  (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
 711
 712        if (qi->tqi_burstTime
 713            && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
 714                REG_WRITE(ah, AR_QMISC(q),
 715                          REG_READ(ah, AR_QMISC(q)) |
 716                          AR_Q_MISC_RDYTIME_EXP_POLICY);
 717
 718        }
 719
 720        if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
 721                REG_WRITE(ah, AR_DMISC(q),
 722                          REG_READ(ah, AR_DMISC(q)) |
 723                          AR_D_MISC_POST_FR_BKOFF_DIS);
 724        }
 725        if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
 726                REG_WRITE(ah, AR_DMISC(q),
 727                          REG_READ(ah, AR_DMISC(q)) |
 728                          AR_D_MISC_FRAG_BKOFF_EN);
 729        }
 730        switch (qi->tqi_type) {
 731        case ATH9K_TX_QUEUE_BEACON:
 732                REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
 733                          | AR_Q_MISC_FSP_DBA_GATED
 734                          | AR_Q_MISC_BEACON_USE
 735                          | AR_Q_MISC_CBR_INCR_DIS1);
 736
 737                REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
 738                          | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
 739                             AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
 740                          | AR_D_MISC_BEACON_USE
 741                          | AR_D_MISC_POST_FR_BKOFF_DIS);
 742                break;
 743        case ATH9K_TX_QUEUE_CAB:
 744                REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
 745                          | AR_Q_MISC_FSP_DBA_GATED
 746                          | AR_Q_MISC_CBR_INCR_DIS1
 747                          | AR_Q_MISC_CBR_INCR_DIS0);
 748                value = (qi->tqi_readyTime -
 749                         (ah->config.sw_beacon_response_time -
 750                          ah->config.dma_beacon_response_time) -
 751                         ah->config.additional_swba_backoff) * 1024;
 752                REG_WRITE(ah, AR_QRDYTIMECFG(q),
 753                          value | AR_Q_RDYTIMECFG_EN);
 754                REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
 755                          | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
 756                             AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
 757                break;
 758        case ATH9K_TX_QUEUE_PSPOLL:
 759                REG_WRITE(ah, AR_QMISC(q),
 760                          REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
 761                break;
 762        case ATH9K_TX_QUEUE_UAPSD:
 763                REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
 764                          AR_D_MISC_POST_FR_BKOFF_DIS);
 765                break;
 766        default:
 767                break;
 768        }
 769
 770        if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
 771                REG_WRITE(ah, AR_DMISC(q),
 772                          REG_READ(ah, AR_DMISC(q)) |
 773                          SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
 774                             AR_D_MISC_ARB_LOCKOUT_CNTRL) |
 775                          AR_D_MISC_POST_FR_BKOFF_DIS);
 776        }
 777
 778        if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
 779                ah->txok_interrupt_mask |= 1 << q;
 780        else
 781                ah->txok_interrupt_mask &= ~(1 << q);
 782        if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
 783                ah->txerr_interrupt_mask |= 1 << q;
 784        else
 785                ah->txerr_interrupt_mask &= ~(1 << q);
 786        if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
 787                ah->txdesc_interrupt_mask |= 1 << q;
 788        else
 789                ah->txdesc_interrupt_mask &= ~(1 << q);
 790        if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
 791                ah->txeol_interrupt_mask |= 1 << q;
 792        else
 793                ah->txeol_interrupt_mask &= ~(1 << q);
 794        if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
 795                ah->txurn_interrupt_mask |= 1 << q;
 796        else
 797                ah->txurn_interrupt_mask &= ~(1 << q);
 798        ath9k_hw_set_txq_interrupts(ah, qi);
 799
 800        return true;
 801}
 802
 803int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
 804                        u32 pa, struct ath_desc *nds, u64 tsf)
 805{
 806        struct ar5416_desc ads;
 807        struct ar5416_desc *adsp = AR5416DESC(ds);
 808        u32 phyerr;
 809
 810        if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
 811                return -EINPROGRESS;
 812
 813        ads.u.rx = adsp->u.rx;
 814
 815        ds->ds_rxstat.rs_status = 0;
 816        ds->ds_rxstat.rs_flags = 0;
 817
 818        ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
 819        ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
 820
 821        if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
 822                ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
 823                ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
 824                ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
 825                ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
 826                ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
 827                ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
 828                ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
 829        } else {
 830                ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
 831                ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
 832                                                AR_RxRSSIAnt00);
 833                ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
 834                                                AR_RxRSSIAnt01);
 835                ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
 836                                                AR_RxRSSIAnt02);
 837                ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
 838                                                AR_RxRSSIAnt10);
 839                ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
 840                                                AR_RxRSSIAnt11);
 841                ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
 842                                                AR_RxRSSIAnt12);
 843        }
 844        if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
 845                ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
 846        else
 847                ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
 848
 849        ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
 850        ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
 851
 852        ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
 853        ds->ds_rxstat.rs_moreaggr =
 854                (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
 855        ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
 856        ds->ds_rxstat.rs_flags =
 857                (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
 858        ds->ds_rxstat.rs_flags |=
 859                (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
 860
 861        if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
 862                ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
 863        if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
 864                ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
 865        if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
 866                ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
 867
 868        if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
 869                if (ads.ds_rxstatus8 & AR_CRCErr)
 870                        ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
 871                else if (ads.ds_rxstatus8 & AR_PHYErr) {
 872                        ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
 873                        phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
 874                        ds->ds_rxstat.rs_phyerr = phyerr;
 875                } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
 876                        ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
 877                else if (ads.ds_rxstatus8 & AR_MichaelErr)
 878                        ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
 879        }
 880
 881        return 0;
 882}
 883
 884void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
 885                          u32 size, u32 flags)
 886{
 887        struct ar5416_desc *ads = AR5416DESC(ds);
 888        struct ath9k_hw_capabilities *pCap = &ah->caps;
 889
 890        ads->ds_ctl1 = size & AR_BufLen;
 891        if (flags & ATH9K_RXDESC_INTREQ)
 892                ads->ds_ctl1 |= AR_RxIntrReq;
 893
 894        ads->ds_rxstatus8 &= ~AR_RxDone;
 895        if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
 896                memset(&(ads->u), 0, sizeof(ads->u));
 897}
 898
 899bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
 900{
 901        u32 reg;
 902
 903        if (set) {
 904                REG_SET_BIT(ah, AR_DIAG_SW,
 905                            (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
 906
 907                if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
 908                                   0, AH_WAIT_TIMEOUT)) {
 909                        REG_CLR_BIT(ah, AR_DIAG_SW,
 910                                    (AR_DIAG_RX_DIS |
 911                                     AR_DIAG_RX_ABORT));
 912
 913                        reg = REG_READ(ah, AR_OBS_BUS_1);
 914                        DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
 915                                "RX failed to go idle in 10 ms RXSM=0x%x\n", reg);
 916
 917                        return false;
 918                }
 919        } else {
 920                REG_CLR_BIT(ah, AR_DIAG_SW,
 921                            (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
 922        }
 923
 924        return true;
 925}
 926
 927void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
 928{
 929        REG_WRITE(ah, AR_RXDP, rxdp);
 930}
 931
 932void ath9k_hw_rxena(struct ath_hw *ah)
 933{
 934        REG_WRITE(ah, AR_CR, AR_CR_RXE);
 935}
 936
 937void ath9k_hw_startpcureceive(struct ath_hw *ah)
 938{
 939        ath9k_enable_mib_counters(ah);
 940
 941        ath9k_ani_reset(ah);
 942
 943        REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
 944}
 945
 946void ath9k_hw_stoppcurecv(struct ath_hw *ah)
 947{
 948        REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
 949
 950        ath9k_hw_disable_mib_counters(ah);
 951}
 952
 953bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
 954{
 955#define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
 956#define AH_RX_TIME_QUANTUM     100     /* usec */
 957
 958        int i;
 959
 960        REG_WRITE(ah, AR_CR, AR_CR_RXD);
 961
 962        /* Wait for rx enable bit to go low */
 963        for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
 964                if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
 965                        break;
 966                udelay(AH_TIME_QUANTUM);
 967        }
 968
 969        if (i == 0) {
 970                DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
 971                        "DMA failed to stop in %d ms "
 972                        "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
 973                        AH_RX_STOP_DMA_TIMEOUT / 1000,
 974                        REG_READ(ah, AR_CR),
 975                        REG_READ(ah, AR_DIAG_SW));
 976                return false;
 977        } else {
 978                return true;
 979        }
 980
 981#undef AH_RX_TIME_QUANTUM
 982#undef AH_RX_STOP_DMA_TIMEOUT
 983}
 984