1
2
3
4
5#ifndef __iwl_trans_queue_tx_h__
6#define __iwl_trans_queue_tx_h__
7#include "iwl-fh.h"
8#include "fw/api/tx.h"
9
10struct iwl_tso_hdr_page {
11 struct page *page;
12 u8 *pos;
13};
14
15static inline dma_addr_t
16iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
17{
18 return txq->first_tb_dma +
19 sizeof(struct iwl_pcie_first_tb_buf) * idx;
20}
21
22static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
23{
24 return index & (q->n_window - 1);
25}
26
27void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
28
29static inline void iwl_wake_queue(struct iwl_trans *trans,
30 struct iwl_txq *txq)
31{
32 if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
33 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
34 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
35 }
36}
37
38static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
39 struct iwl_txq *txq, int idx)
40{
41 if (trans->trans_cfg->use_tfh)
42 idx = iwl_txq_get_cmd_index(txq, idx);
43
44 return txq->tfds + trans->txqs.tfd.size * idx;
45}
46
47int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
48 bool cmd_queue);
49
50
51
52
53
54static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
55{
56 return upper_32_bits(phys) != upper_32_bits(phys + len);
57}
58
59int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
60
61static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
62{
63 if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
64 iwl_op_mode_queue_full(trans->op_mode, txq->id);
65 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
66 } else {
67 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
68 txq->id);
69 }
70}
71
72
73
74
75
76static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
77{
78 return ++index &
79 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
80}
81
82
83
84
85
86static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
87{
88 return --index &
89 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
90}
91
92static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
93{
94 int index = iwl_txq_get_cmd_index(q, i);
95 int r = iwl_txq_get_cmd_index(q, q->read_ptr);
96 int w = iwl_txq_get_cmd_index(q, q->write_ptr);
97
98 return w >= r ?
99 (index >= r && index < w) :
100 !(index < r && index >= w);
101}
102
103void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
104
105void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
106
107int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
108 struct iwl_tfh_tfd *tfd, dma_addr_t addr,
109 u16 len);
110
111void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
112 struct iwl_cmd_meta *meta,
113 struct iwl_tfh_tfd *tfd);
114
115int iwl_txq_dyn_alloc(struct iwl_trans *trans,
116 __le16 flags, u8 sta_id, u8 tid,
117 int cmd_id, int size,
118 unsigned int timeout);
119
120int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
121 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
122
123void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
124void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
125void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
126void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
127int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
128 bool cmd_queue);
129int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
130#ifdef CONFIG_INET
131struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
132 struct sk_buff *skb);
133#endif
134static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
135 void *_tfd)
136{
137 struct iwl_tfd *tfd;
138
139 if (trans->trans_cfg->use_tfh) {
140 struct iwl_tfh_tfd *tfd = _tfd;
141
142 return le16_to_cpu(tfd->num_tbs) & 0x1f;
143 }
144
145 tfd = (struct iwl_tfd *)_tfd;
146 return tfd->num_tbs & 0x1f;
147}
148
149static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
150 void *_tfd, u8 idx)
151{
152 struct iwl_tfd *tfd;
153 struct iwl_tfd_tb *tb;
154
155 if (trans->trans_cfg->use_tfh) {
156 struct iwl_tfh_tfd *tfd = _tfd;
157 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
158
159 return le16_to_cpu(tb->tb_len);
160 }
161
162 tfd = (struct iwl_tfd *)_tfd;
163 tb = &tfd->tbs[idx];
164
165 return le16_to_cpu(tb->hi_n_len) >> 4;
166}
167
168void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
169 struct iwl_cmd_meta *meta,
170 struct iwl_txq *txq, int index);
171void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
172 struct iwl_txq *txq);
173void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
174 struct iwl_txq *txq, u16 byte_cnt,
175 int num_tbs);
176void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
177 struct sk_buff_head *skbs);
178void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
179void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
180 bool freeze);
181void iwl_txq_progress(struct iwl_txq *txq);
182void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
183int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
184#endif
185