linux/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
<<
>>
Prefs
   1/*
   2        Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
   3        <http://rt2x00.serialmonkey.com>
   4
   5        This program is free software; you can redistribute it and/or modify
   6        it under the terms of the GNU General Public License as published by
   7        the Free Software Foundation; either version 2 of the License, or
   8        (at your option) any later version.
   9
  10        This program is distributed in the hope that it will be useful,
  11        but WITHOUT ANY WARRANTY; without even the implied warranty of
  12        MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13        GNU General Public License for more details.
  14
  15        You should have received a copy of the GNU General Public License
  16        along with this program; if not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19/*
  20        Module: rt2x00
  21        Abstract: rt2x00 queue datastructures and routines
  22 */
  23
  24#ifndef RT2X00QUEUE_H
  25#define RT2X00QUEUE_H
  26
  27#include <linux/prefetch.h>
  28
  29/**
  30 * DOC: Entry frame size
  31 *
  32 * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
  33 * for USB devices this restriction does not apply, but the value of
  34 * 2432 makes sense since it is big enough to contain the maximum fragment
  35 * size according to the ieee802.11 specs.
  36 * The aggregation size depends on support from the driver, but should
  37 * be something around 3840 bytes.
  38 */
  39#define DATA_FRAME_SIZE         2432
  40#define MGMT_FRAME_SIZE         256
  41#define AGGREGATION_SIZE        3840
  42
  43/**
  44 * enum data_queue_qid: Queue identification
  45 *
  46 * @QID_AC_VO: AC VO queue
  47 * @QID_AC_VI: AC VI queue
  48 * @QID_AC_BE: AC BE queue
  49 * @QID_AC_BK: AC BK queue
  50 * @QID_HCCA: HCCA queue
  51 * @QID_MGMT: MGMT queue (prio queue)
  52 * @QID_RX: RX queue
  53 * @QID_OTHER: None of the above (don't use, only present for completeness)
  54 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
  55 * @QID_ATIM: Atim queue (value unspecified, don't send it to device)
  56 */
  57enum data_queue_qid {
  58        QID_AC_VO = 0,
  59        QID_AC_VI = 1,
  60        QID_AC_BE = 2,
  61        QID_AC_BK = 3,
  62        QID_HCCA = 4,
  63        QID_MGMT = 13,
  64        QID_RX = 14,
  65        QID_OTHER = 15,
  66        QID_BEACON,
  67        QID_ATIM,
  68};
  69
  70/**
  71 * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
  72 *
  73 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
  74 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
  75 * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
  76 *      mac80211 but was stripped for processing by the driver.
  77 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
  78 *      don't try to pass it back.
  79 * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the
  80 *      skb, instead of in the desc field.
  81 */
  82enum skb_frame_desc_flags {
  83        SKBDESC_DMA_MAPPED_RX = 1 << 0,
  84        SKBDESC_DMA_MAPPED_TX = 1 << 1,
  85        SKBDESC_IV_STRIPPED = 1 << 2,
  86        SKBDESC_NOT_MAC80211 = 1 << 3,
  87        SKBDESC_DESC_IN_SKB = 1 << 4,
  88};
  89
  90/**
  91 * struct skb_frame_desc: Descriptor information for the skb buffer
  92 *
  93 * This structure is placed over the driver_data array, this means that
  94 * this structure should not exceed the size of that array (40 bytes).
  95 *
  96 * @flags: Frame flags, see &enum skb_frame_desc_flags.
  97 * @desc_len: Length of the frame descriptor.
  98 * @tx_rate_idx: the index of the TX rate, used for TX status reporting
  99 * @tx_rate_flags: the TX rate flags, used for TX status reporting
 100 * @desc: Pointer to descriptor part of the frame.
 101 *      Note that this pointer could point to something outside
 102 *      of the scope of the skb->data pointer.
 103 * @iv: IV/EIV data used during encryption/decryption.
 104 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
 105 * @entry: The entry to which this sk buffer belongs.
 106 */
 107struct skb_frame_desc {
 108        u8 flags;
 109
 110        u8 desc_len;
 111        u8 tx_rate_idx;
 112        u8 tx_rate_flags;
 113
 114        void *desc;
 115
 116        __le32 iv[2];
 117
 118        dma_addr_t skb_dma;
 119
 120        struct queue_entry *entry;
 121};
 122
 123/**
 124 * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
 125 * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
 126 */
 127static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
 128{
 129        BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
 130                     IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
 131        return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
 132}
 133
 134/**
 135 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
 136 *
 137 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
 138 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
 139 * @RXDONE_SIGNAL_MCS: Signal field contains the mcs value.
 140 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
 141 * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data.
 142 * @RXDONE_CRYPTO_ICV: Driver provided ICV data.
 143 * @RXDONE_L2PAD: 802.11 payload has been padded to 4-byte boundary.
 144 */
 145enum rxdone_entry_desc_flags {
 146        RXDONE_SIGNAL_PLCP = BIT(0),
 147        RXDONE_SIGNAL_BITRATE = BIT(1),
 148        RXDONE_SIGNAL_MCS = BIT(2),
 149        RXDONE_MY_BSS = BIT(3),
 150        RXDONE_CRYPTO_IV = BIT(4),
 151        RXDONE_CRYPTO_ICV = BIT(5),
 152        RXDONE_L2PAD = BIT(6),
 153};
 154
 155/**
 156 * RXDONE_SIGNAL_MASK - Define to mask off all &rxdone_entry_desc_flags flags
 157 * except for the RXDONE_SIGNAL_* flags. This is useful to convert the dev_flags
 158 * from &rxdone_entry_desc to a signal value type.
 159 */
 160#define RXDONE_SIGNAL_MASK \
 161        ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE | RXDONE_SIGNAL_MCS )
 162
 163/**
 164 * struct rxdone_entry_desc: RX Entry descriptor
 165 *
 166 * Summary of information that has been read from the RX frame descriptor.
 167 *
 168 * @timestamp: RX Timestamp
 169 * @signal: Signal of the received frame.
 170 * @rssi: RSSI of the received frame.
 171 * @size: Data size of the received frame.
 172 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
 173 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
 174 * @rate_mode: Rate mode (See @enum rate_modulation).
 175 * @cipher: Cipher type used during decryption.
 176 * @cipher_status: Decryption status.
 177 * @iv: IV/EIV data used during decryption.
 178 * @icv: ICV data used during decryption.
 179 */
 180struct rxdone_entry_desc {
 181        u64 timestamp;
 182        int signal;
 183        int rssi;
 184        int size;
 185        int flags;
 186        int dev_flags;
 187        u16 rate_mode;
 188        u8 cipher;
 189        u8 cipher_status;
 190
 191        __le32 iv[2];
 192        __le32 icv;
 193};
 194
 195/**
 196 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
 197 *
 198 * Every txdone report has to contain the basic result of the
 199 * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
 200 * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
 201 * conjunction with all of these flags but should only be set
 202 * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
 203 * in conjunction with &TXDONE_FAILURE.
 204 *
 205 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
 206 * @TXDONE_SUCCESS: Frame was successfully send
 207 * @TXDONE_FALLBACK: Hardware used fallback rates for retries
 208 * @TXDONE_FAILURE: Frame was not successfully send
 209 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
 210 *      frame transmission failed due to excessive retries.
 211 */
 212enum txdone_entry_desc_flags {
 213        TXDONE_UNKNOWN,
 214        TXDONE_SUCCESS,
 215        TXDONE_FALLBACK,
 216        TXDONE_FAILURE,
 217        TXDONE_EXCESSIVE_RETRY,
 218        TXDONE_AMPDU,
 219};
 220
 221/**
 222 * struct txdone_entry_desc: TX done entry descriptor
 223 *
 224 * Summary of information that has been read from the TX frame descriptor
 225 * after the device is done with transmission.
 226 *
 227 * @flags: TX done flags (See &enum txdone_entry_desc_flags).
 228 * @retry: Retry count.
 229 */
 230struct txdone_entry_desc {
 231        unsigned long flags;
 232        int retry;
 233};
 234
 235/**
 236 * enum txentry_desc_flags: Status flags for TX entry descriptor
 237 *
 238 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
 239 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
 240 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
 241 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
 242 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
 243 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
 244 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
 245 * @ENTRY_TXD_ACK: An ACK is required for this frame.
 246 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
 247 * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
 248 * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
 249 * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
 250 * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
 251 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
 252 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
 253 * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
 254 * @ENTRY_TXD_HT_MIMO_PS: The receiving STA is in dynamic SM PS mode.
 255 */
 256enum txentry_desc_flags {
 257        ENTRY_TXD_RTS_FRAME,
 258        ENTRY_TXD_CTS_FRAME,
 259        ENTRY_TXD_GENERATE_SEQ,
 260        ENTRY_TXD_FIRST_FRAGMENT,
 261        ENTRY_TXD_MORE_FRAG,
 262        ENTRY_TXD_REQ_TIMESTAMP,
 263        ENTRY_TXD_BURST,
 264        ENTRY_TXD_ACK,
 265        ENTRY_TXD_RETRY_MODE,
 266        ENTRY_TXD_ENCRYPT,
 267        ENTRY_TXD_ENCRYPT_PAIRWISE,
 268        ENTRY_TXD_ENCRYPT_IV,
 269        ENTRY_TXD_ENCRYPT_MMIC,
 270        ENTRY_TXD_HT_AMPDU,
 271        ENTRY_TXD_HT_BW_40,
 272        ENTRY_TXD_HT_SHORT_GI,
 273        ENTRY_TXD_HT_MIMO_PS,
 274};
 275
 276/**
 277 * struct txentry_desc: TX Entry descriptor
 278 *
 279 * Summary of information for the frame descriptor before sending a TX frame.
 280 *
 281 * @flags: Descriptor flags (See &enum queue_entry_flags).
 282 * @length: Length of the entire frame.
 283 * @header_length: Length of 802.11 header.
 284 * @length_high: PLCP length high word.
 285 * @length_low: PLCP length low word.
 286 * @signal: PLCP signal.
 287 * @service: PLCP service.
 288 * @msc: MCS.
 289 * @stbc: Use Space Time Block Coding (only available for MCS rates < 8).
 290 * @ba_size: Size of the recepients RX reorder buffer - 1.
 291 * @rate_mode: Rate mode (See @enum rate_modulation).
 292 * @mpdu_density: MDPU density.
 293 * @retry_limit: Max number of retries.
 294 * @ifs: IFS value.
 295 * @txop: IFS value for 11n capable chips.
 296 * @cipher: Cipher type used for encryption.
 297 * @key_idx: Key index used for encryption.
 298 * @iv_offset: Position where IV should be inserted by hardware.
 299 * @iv_len: Length of IV data.
 300 */
 301struct txentry_desc {
 302        unsigned long flags;
 303
 304        u16 length;
 305        u16 header_length;
 306
 307        union {
 308                struct {
 309                        u16 length_high;
 310                        u16 length_low;
 311                        u16 signal;
 312                        u16 service;
 313                        enum ifs ifs;
 314                } plcp;
 315
 316                struct {
 317                        u16 mcs;
 318                        u8 stbc;
 319                        u8 ba_size;
 320                        u8 mpdu_density;
 321                        enum txop txop;
 322                        int wcid;
 323                } ht;
 324        } u;
 325
 326        enum rate_modulation rate_mode;
 327
 328        short retry_limit;
 329
 330        enum cipher cipher;
 331        u16 key_idx;
 332        u16 iv_offset;
 333        u16 iv_len;
 334};
 335
 336/**
 337 * enum queue_entry_flags: Status flags for queue entry
 338 *
 339 * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
 340 *      As long as this bit is set, this entry may only be touched
 341 *      through the interface structure.
 342 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
 343 *      transfer (either TX or RX depending on the queue). The entry should
 344 *      only be touched after the device has signaled it is done with it.
 345 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
 346 *      for the signal to start sending.
 347 * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occurred
 348 *      while transferring the data to the hardware. No TX status report will
 349 *      be expected from the hardware.
 350 * @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
 351 *      returned. It is now waiting for the status reporting before the
 352 *      entry can be reused again.
 353 */
 354enum queue_entry_flags {
 355        ENTRY_BCN_ASSIGNED,
 356        ENTRY_BCN_ENABLED,
 357        ENTRY_OWNER_DEVICE_DATA,
 358        ENTRY_DATA_PENDING,
 359        ENTRY_DATA_IO_FAILED,
 360        ENTRY_DATA_STATUS_PENDING,
 361        ENTRY_DATA_STATUS_SET,
 362};
 363
 364/**
 365 * struct queue_entry: Entry inside the &struct data_queue
 366 *
 367 * @flags: Entry flags, see &enum queue_entry_flags.
 368 * @last_action: Timestamp of last change.
 369 * @queue: The data queue (&struct data_queue) to which this entry belongs.
 370 * @skb: The buffer which is currently being transmitted (for TX queue),
 371 *      or used to directly receive data in (for RX queue).
 372 * @entry_idx: The entry index number.
 373 * @priv_data: Private data belonging to this queue entry. The pointer
 374 *      points to data specific to a particular driver and queue type.
 375 * @status: Device specific status
 376 */
 377struct queue_entry {
 378        unsigned long flags;
 379        unsigned long last_action;
 380
 381        struct data_queue *queue;
 382
 383        struct sk_buff *skb;
 384
 385        unsigned int entry_idx;
 386
 387        u32 status;
 388
 389        void *priv_data;
 390};
 391
 392/**
 393 * enum queue_index: Queue index type
 394 *
 395 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
 396 *      owned by the hardware then the queue is considered to be full.
 397 * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
 398 *      transferred to the hardware.
 399 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
 400 *      the hardware and for which we need to run the txdone handler. If this
 401 *      entry is not owned by the hardware the queue is considered to be empty.
 402 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
 403 *      of the index array.
 404 */
 405enum queue_index {
 406        Q_INDEX,
 407        Q_INDEX_DMA_DONE,
 408        Q_INDEX_DONE,
 409        Q_INDEX_MAX,
 410};
 411
 412/**
 413 * enum data_queue_flags: Status flags for data queues
 414 *
 415 * @QUEUE_STARTED: The queue has been started. Fox RX queues this means the
 416 *      device might be DMA'ing skbuffers. TX queues will accept skbuffers to
 417 *      be transmitted and beacon queues will start beaconing the configured
 418 *      beacons.
 419 * @QUEUE_PAUSED: The queue has been started but is currently paused.
 420 *      When this bit is set, the queue has been stopped in mac80211,
 421 *      preventing new frames to be enqueued. However, a few frames
 422 *      might still appear shortly after the pausing...
 423 */
 424enum data_queue_flags {
 425        QUEUE_STARTED,
 426        QUEUE_PAUSED,
 427};
 428
 429/**
 430 * struct data_queue: Data queue
 431 *
 432 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
 433 * @entries: Base address of the &struct queue_entry which are
 434 *      part of this queue.
 435 * @qid: The queue identification, see &enum data_queue_qid.
 436 * @flags: Entry flags, see &enum queue_entry_flags.
 437 * @status_lock: The mutex for protecting the start/stop/flush
 438 *      handling on this queue.
 439 * @tx_lock: Spinlock to serialize tx operations on this queue.
 440 * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
 441 *      @index_crypt needs to be changed this lock should be grabbed to prevent
 442 *      index corruption due to concurrency.
 443 * @count: Number of frames handled in the queue.
 444 * @limit: Maximum number of entries in the queue.
 445 * @threshold: Minimum number of free entries before queue is kicked by force.
 446 * @length: Number of frames in queue.
 447 * @index: Index pointers to entry positions in the queue,
 448 *      use &enum queue_index to get a specific index field.
 449 * @txop: maximum burst time.
 450 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
 451 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
 452 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
 453 * @data_size: Maximum data size for the frames in this queue.
 454 * @desc_size: Hardware descriptor size for the data in this queue.
 455 * @priv_size: Size of per-queue_entry private data.
 456 * @usb_endpoint: Device endpoint used for communication (USB only)
 457 * @usb_maxpacket: Max packet size for given endpoint (USB only)
 458 */
 459struct data_queue {
 460        struct rt2x00_dev *rt2x00dev;
 461        struct queue_entry *entries;
 462
 463        enum data_queue_qid qid;
 464        unsigned long flags;
 465
 466        struct mutex status_lock;
 467        spinlock_t tx_lock;
 468        spinlock_t index_lock;
 469
 470        unsigned int count;
 471        unsigned short limit;
 472        unsigned short threshold;
 473        unsigned short length;
 474        unsigned short index[Q_INDEX_MAX];
 475
 476        unsigned short txop;
 477        unsigned short aifs;
 478        unsigned short cw_min;
 479        unsigned short cw_max;
 480
 481        unsigned short data_size;
 482        unsigned char  desc_size;
 483        unsigned char  winfo_size;
 484        unsigned short priv_size;
 485
 486        unsigned short usb_endpoint;
 487        unsigned short usb_maxpacket;
 488};
 489
 490/**
 491 * queue_end - Return pointer to the last queue (HELPER MACRO).
 492 * @__dev: Pointer to &struct rt2x00_dev
 493 *
 494 * Using the base rx pointer and the maximum number of available queues,
 495 * this macro will return the address of 1 position beyond  the end of the
 496 * queues array.
 497 */
 498#define queue_end(__dev) \
 499        &(__dev)->rx[(__dev)->data_queues]
 500
 501/**
 502 * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
 503 * @__dev: Pointer to &struct rt2x00_dev
 504 *
 505 * Using the base tx pointer and the maximum number of available TX
 506 * queues, this macro will return the address of 1 position beyond
 507 * the end of the TX queue array.
 508 */
 509#define tx_queue_end(__dev) \
 510        &(__dev)->tx[(__dev)->ops->tx_queues]
 511
 512/**
 513 * queue_next - Return pointer to next queue in list (HELPER MACRO).
 514 * @__queue: Current queue for which we need the next queue
 515 *
 516 * Using the current queue address we take the address directly
 517 * after the queue to take the next queue. Note that this macro
 518 * should be used carefully since it does not protect against
 519 * moving past the end of the list. (See macros &queue_end and
 520 * &tx_queue_end for determining the end of the queue).
 521 */
 522#define queue_next(__queue) \
 523        &(__queue)[1]
 524
 525/**
 526 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
 527 * @__entry: Pointer where the current queue entry will be stored in.
 528 * @__start: Start queue pointer.
 529 * @__end: End queue pointer.
 530 *
 531 * This macro will loop through all queues between &__start and &__end.
 532 */
 533#define queue_loop(__entry, __start, __end)                     \
 534        for ((__entry) = (__start);                             \
 535             prefetch(queue_next(__entry)), (__entry) != (__end);\
 536             (__entry) = queue_next(__entry))
 537
 538/**
 539 * queue_for_each - Loop through all queues
 540 * @__dev: Pointer to &struct rt2x00_dev
 541 * @__entry: Pointer where the current queue entry will be stored in.
 542 *
 543 * This macro will loop through all available queues.
 544 */
 545#define queue_for_each(__dev, __entry) \
 546        queue_loop(__entry, (__dev)->rx, queue_end(__dev))
 547
 548/**
 549 * tx_queue_for_each - Loop through the TX queues
 550 * @__dev: Pointer to &struct rt2x00_dev
 551 * @__entry: Pointer where the current queue entry will be stored in.
 552 *
 553 * This macro will loop through all TX related queues excluding
 554 * the Beacon and Atim queues.
 555 */
 556#define tx_queue_for_each(__dev, __entry) \
 557        queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
 558
 559/**
 560 * txall_queue_for_each - Loop through all TX related queues
 561 * @__dev: Pointer to &struct rt2x00_dev
 562 * @__entry: Pointer where the current queue entry will be stored in.
 563 *
 564 * This macro will loop through all TX related queues including
 565 * the Beacon and Atim queues.
 566 */
 567#define txall_queue_for_each(__dev, __entry) \
 568        queue_loop(__entry, (__dev)->tx, queue_end(__dev))
 569
 570/**
 571 * rt2x00queue_for_each_entry - Loop through all entries in the queue
 572 * @queue: Pointer to @data_queue
 573 * @start: &enum queue_index Pointer to start index
 574 * @end: &enum queue_index Pointer to end index
 575 * @data: Data to pass to the callback function
 576 * @fn: The function to call for each &struct queue_entry
 577 *
 578 * This will walk through all entries in the queue, in chronological
 579 * order. This means it will start at the current @start pointer
 580 * and will walk through the queue until it reaches the @end pointer.
 581 *
 582 * If fn returns true for an entry rt2x00queue_for_each_entry will stop
 583 * processing and return true as well.
 584 */
 585bool rt2x00queue_for_each_entry(struct data_queue *queue,
 586                                enum queue_index start,
 587                                enum queue_index end,
 588                                void *data,
 589                                bool (*fn)(struct queue_entry *entry,
 590                                           void *data));
 591
 592/**
 593 * rt2x00queue_empty - Check if the queue is empty.
 594 * @queue: Queue to check if empty.
 595 */
 596static inline int rt2x00queue_empty(struct data_queue *queue)
 597{
 598        return queue->length == 0;
 599}
 600
 601/**
 602 * rt2x00queue_full - Check if the queue is full.
 603 * @queue: Queue to check if full.
 604 */
 605static inline int rt2x00queue_full(struct data_queue *queue)
 606{
 607        return queue->length == queue->limit;
 608}
 609
 610/**
 611 * rt2x00queue_free - Check the number of available entries in queue.
 612 * @queue: Queue to check.
 613 */
 614static inline int rt2x00queue_available(struct data_queue *queue)
 615{
 616        return queue->limit - queue->length;
 617}
 618
 619/**
 620 * rt2x00queue_threshold - Check if the queue is below threshold
 621 * @queue: Queue to check.
 622 */
 623static inline int rt2x00queue_threshold(struct data_queue *queue)
 624{
 625        return rt2x00queue_available(queue) < queue->threshold;
 626}
 627/**
 628 * rt2x00queue_dma_timeout - Check if a timeout occurred for DMA transfers
 629 * @entry: Queue entry to check.
 630 */
 631static inline int rt2x00queue_dma_timeout(struct queue_entry *entry)
 632{
 633        if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
 634                return false;
 635        return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
 636}
 637
 638/**
 639 * _rt2x00_desc_read - Read a word from the hardware descriptor.
 640 * @desc: Base descriptor address
 641 * @word: Word index from where the descriptor should be read.
 642 * @value: Address where the descriptor value should be written into.
 643 */
 644static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value)
 645{
 646        *value = desc[word];
 647}
 648
 649/**
 650 * rt2x00_desc_read - Read a word from the hardware descriptor, this
 651 * function will take care of the byte ordering.
 652 * @desc: Base descriptor address
 653 * @word: Word index from where the descriptor should be read.
 654 * @value: Address where the descriptor value should be written into.
 655 */
 656static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
 657{
 658        __le32 tmp;
 659        _rt2x00_desc_read(desc, word, &tmp);
 660        *value = le32_to_cpu(tmp);
 661}
 662
 663/**
 664 * rt2x00_desc_write - write a word to the hardware descriptor, this
 665 * function will take care of the byte ordering.
 666 * @desc: Base descriptor address
 667 * @word: Word index from where the descriptor should be written.
 668 * @value: Value that should be written into the descriptor.
 669 */
 670static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
 671{
 672        desc[word] = value;
 673}
 674
 675/**
 676 * rt2x00_desc_write - write a word to the hardware descriptor.
 677 * @desc: Base descriptor address
 678 * @word: Word index from where the descriptor should be written.
 679 * @value: Value that should be written into the descriptor.
 680 */
 681static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
 682{
 683        _rt2x00_desc_write(desc, word, cpu_to_le32(value));
 684}
 685
 686#endif /* RT2X00QUEUE_H */
 687