1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#include <linux/skbuff.h>
68#include <linux/netdevice.h>
69#include <linux/ieee80211.h>
70
71#include "iwm.h"
72#include "debug.h"
73#include "commands.h"
74#include "hal.h"
75#include "umac.h"
76#include "bus.h"
77
78#define IWM_UMAC_PAGE_ALLOC_WRAP 0xffff
79
80#define BYTES_TO_PAGES(n) (1 + ((n) >> ilog2(IWM_UMAC_PAGE_SIZE)) - \
81 (((n) & (IWM_UMAC_PAGE_SIZE - 1)) == 0))
82
83#define pool_id_to_queue(id) ((id < IWM_TX_CMD_QUEUE) ? id : id - 1)
84#define queue_to_pool_id(q) ((q < IWM_TX_CMD_QUEUE) ? q : q + 1)
85
86
87static int iwm_tx_credit_get(struct iwm_tx_credit *tx_credit, int id)
88{
89 struct pool_entry *pool = &tx_credit->pools[id];
90 struct spool_entry *spool = &tx_credit->spools[pool->sid];
91 int spool_pages;
92
93
94 spool_pages = spool->max_pages - spool->alloc_pages +
95 max(pool->min_pages - pool->alloc_pages, 0);
96
97 return min(pool->max_pages - pool->alloc_pages, spool_pages);
98}
99
100static bool iwm_tx_credit_ok(struct iwm_priv *iwm, int id, int nb)
101{
102 u32 npages = BYTES_TO_PAGES(nb);
103
104 if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id))
105 return 1;
106
107 set_bit(id, &iwm->tx_credit.full_pools_map);
108
109 IWM_DBG_TX(iwm, DBG, "LINK: stop txq[%d], available credit: %d\n",
110 pool_id_to_queue(id),
111 iwm_tx_credit_get(&iwm->tx_credit, id));
112
113 return 0;
114}
115
116void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages)
117{
118 struct pool_entry *pool;
119 struct spool_entry *spool;
120 int freed_pages;
121 int queue;
122
123 BUG_ON(id >= IWM_MACS_OUT_GROUPS);
124
125 pool = &iwm->tx_credit.pools[id];
126 spool = &iwm->tx_credit.spools[pool->sid];
127
128 freed_pages = total_freed_pages - pool->total_freed_pages;
129 IWM_DBG_TX(iwm, DBG, "Free %d pages for pool[%d]\n", freed_pages, id);
130
131 if (!freed_pages) {
132 IWM_DBG_TX(iwm, DBG, "No pages are freed by UMAC\n");
133 return;
134 } else if (freed_pages < 0)
135 freed_pages += IWM_UMAC_PAGE_ALLOC_WRAP + 1;
136
137 if (pool->alloc_pages > pool->min_pages) {
138 int spool_pages = pool->alloc_pages - pool->min_pages;
139 spool_pages = min(spool_pages, freed_pages);
140 spool->alloc_pages -= spool_pages;
141 }
142
143 pool->alloc_pages -= freed_pages;
144 pool->total_freed_pages = total_freed_pages;
145
146 IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
147 "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
148 pool->total_freed_pages, pool->sid, spool->alloc_pages);
149
150 if (test_bit(id, &iwm->tx_credit.full_pools_map) &&
151 (pool->alloc_pages < pool->max_pages / 2)) {
152 clear_bit(id, &iwm->tx_credit.full_pools_map);
153
154 queue = pool_id_to_queue(id);
155
156 IWM_DBG_TX(iwm, DBG, "LINK: start txq[%d], available "
157 "credit: %d\n", queue,
158 iwm_tx_credit_get(&iwm->tx_credit, id));
159 queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
160 }
161}
162
163static void iwm_tx_credit_dec(struct iwm_priv *iwm, int id, int alloc_pages)
164{
165 struct pool_entry *pool;
166 struct spool_entry *spool;
167 int spool_pages;
168
169 IWM_DBG_TX(iwm, DBG, "Allocate %d pages for pool[%d]\n",
170 alloc_pages, id);
171
172 BUG_ON(id >= IWM_MACS_OUT_GROUPS);
173
174 pool = &iwm->tx_credit.pools[id];
175 spool = &iwm->tx_credit.spools[pool->sid];
176
177 spool_pages = pool->alloc_pages + alloc_pages - pool->min_pages;
178
179 if (pool->alloc_pages >= pool->min_pages)
180 spool->alloc_pages += alloc_pages;
181 else if (spool_pages > 0)
182 spool->alloc_pages += spool_pages;
183
184 pool->alloc_pages += alloc_pages;
185
186 IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
187 "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
188 pool->total_freed_pages, pool->sid, spool->alloc_pages);
189}
190
191int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb)
192{
193 u32 npages = BYTES_TO_PAGES(nb);
194 int ret = 0;
195
196 spin_lock(&iwm->tx_credit.lock);
197
198 if (!iwm_tx_credit_ok(iwm, id, nb)) {
199 IWM_DBG_TX(iwm, DBG, "No credit avaliable for pool[%d]\n", id);
200 ret = -ENOSPC;
201 goto out;
202 }
203
204 iwm_tx_credit_dec(iwm, id, npages);
205
206 out:
207 spin_unlock(&iwm->tx_credit.lock);
208 return ret;
209}
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
248 struct iwm_umac_notif_alive *alive)
249{
250 int i, sid, pool_pages;
251
252 spin_lock(&iwm->tx_credit.lock);
253
254 iwm->tx_credit.pool_nr = le16_to_cpu(alive->page_grp_count);
255 iwm->tx_credit.full_pools_map = 0;
256 memset(&iwm->tx_credit.spools[0], 0, sizeof(struct spool_entry));
257
258 IWM_DBG_TX(iwm, DBG, "Pools number is %d\n", iwm->tx_credit.pool_nr);
259
260 for (i = 0; i < iwm->tx_credit.pool_nr; i++) {
261 __le32 page_grp_state = alive->page_grp_state[i];
262
263 iwm->tx_credit.pools[i].id = GET_VAL32(page_grp_state,
264 UMAC_ALIVE_PAGE_STS_GRP_NUM);
265 iwm->tx_credit.pools[i].sid = GET_VAL32(page_grp_state,
266 UMAC_ALIVE_PAGE_STS_SGRP_NUM);
267 iwm->tx_credit.pools[i].min_pages = GET_VAL32(page_grp_state,
268 UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE);
269 iwm->tx_credit.pools[i].max_pages = GET_VAL32(page_grp_state,
270 UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE);
271 iwm->tx_credit.pools[i].alloc_pages = 0;
272 iwm->tx_credit.pools[i].total_freed_pages = 0;
273
274 sid = iwm->tx_credit.pools[i].sid;
275 pool_pages = iwm->tx_credit.pools[i].min_pages;
276
277 if (iwm->tx_credit.spools[sid].max_pages == 0) {
278 iwm->tx_credit.spools[sid].id = sid;
279 iwm->tx_credit.spools[sid].max_pages =
280 GET_VAL32(page_grp_state,
281 UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE);
282 iwm->tx_credit.spools[sid].alloc_pages = 0;
283 }
284
285 iwm->tx_credit.spools[sid].alloc_pages += pool_pages;
286
287 IWM_DBG_TX(iwm, DBG, "Pool idx: %d, id: %d, sid: %d, capacity "
288 "min: %d, max: %d, pool alloc: %d, total_free: %d, "
289 "super poll alloc: %d\n",
290 i, iwm->tx_credit.pools[i].id,
291 iwm->tx_credit.pools[i].sid,
292 iwm->tx_credit.pools[i].min_pages,
293 iwm->tx_credit.pools[i].max_pages,
294 iwm->tx_credit.pools[i].alloc_pages,
295 iwm->tx_credit.pools[i].total_freed_pages,
296 iwm->tx_credit.spools[sid].alloc_pages);
297 }
298
299 spin_unlock(&iwm->tx_credit.lock);
300}
301
302#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr)
303
304static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
305 int pool_id, u8 *buf)
306{
307 struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
308 struct iwm_udma_wifi_cmd udma_cmd;
309 struct iwm_umac_cmd umac_cmd;
310 struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
311
312 udma_cmd.count = cpu_to_le16(skb->len +
313 sizeof(struct iwm_umac_fw_cmd_hdr));
314
315
316 udma_cmd.eop = 0;
317 udma_cmd.credit_group = pool_id;
318 udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
319 udma_cmd.lmac_offset = 0;
320
321 umac_cmd.id = REPLY_TX;
322 umac_cmd.count = cpu_to_le16(skb->len);
323 umac_cmd.color = tx_info->color;
324 umac_cmd.resp = 0;
325 umac_cmd.seq_num = cpu_to_le16(iwm_alloc_wifi_cmd_seq(iwm));
326
327 iwm_build_udma_wifi_hdr(iwm, &hdr->hw_hdr, &udma_cmd);
328 iwm_build_umac_hdr(iwm, &hdr->sw_hdr, &umac_cmd);
329
330 memcpy(buf + sizeof(*hdr), skb->data, skb->len);
331
332 return 0;
333}
334
335static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
336 struct iwm_tx_queue *txq)
337{
338 int ret;
339
340 if (!txq->concat_count)
341 return 0;
342
343 IWM_DBG_TX(iwm, DBG, "Send concatenated Tx: queue %d, %d bytes\n",
344 txq->id, txq->concat_count);
345
346
347 iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
348
349 ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
350
351 txq->concat_count = 0;
352 txq->concat_ptr = txq->concat_buf;
353
354 return ret;
355}
356
357#define CONFIG_IWM_TX_CONCATENATED 1
358
359void iwm_tx_worker(struct work_struct *work)
360{
361 struct iwm_priv *iwm;
362 struct iwm_tx_info *tx_info = NULL;
363 struct sk_buff *skb;
364 int cmdlen, ret;
365 struct iwm_tx_queue *txq;
366 int pool_id;
367
368 txq = container_of(work, struct iwm_tx_queue, worker);
369 iwm = container_of(txq, struct iwm_priv, txq[txq->id]);
370
371 pool_id = queue_to_pool_id(txq->id);
372
373 while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
374 !skb_queue_empty(&txq->queue)) {
375
376 skb = skb_dequeue(&txq->queue);
377 tx_info = skb_to_tx_info(skb);
378 cmdlen = IWM_UDMA_HDR_LEN + skb->len;
379
380 IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
381 "%d, color: %d\n", txq->id, skb, tx_info->sta,
382 tx_info->color);
383
384#if !CONFIG_IWM_TX_CONCATENATED
385
386 ret = iwm_send_packet(iwm, skb, pool_id);
387#else
388
389 if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
390 iwm_tx_send_concat_packets(iwm, txq);
391
392 ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
393 if (ret) {
394 IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
395 "%d, Tx worker stopped\n", txq->id);
396 skb_queue_head(&txq->queue, skb);
397 break;
398 }
399
400 txq->concat_ptr = txq->concat_buf + txq->concat_count;
401 iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
402 txq->concat_count += ALIGN(cmdlen, 16);
403#endif
404 kfree_skb(skb);
405 }
406
407 iwm_tx_send_concat_packets(iwm, txq);
408
409 if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
410 !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
411 (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
412 IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
413 netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
414 }
415}
416
417int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
418{
419 struct iwm_priv *iwm = ndev_to_iwm(netdev);
420 struct net_device *ndev = iwm_to_ndev(iwm);
421 struct wireless_dev *wdev = iwm_to_wdev(iwm);
422 u8 *dst_addr;
423 struct iwm_tx_info *tx_info;
424 struct iwm_tx_queue *txq;
425 struct iwm_sta_info *sta_info;
426 u8 sta_id;
427 u16 queue;
428 int ret;
429
430 if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
431 IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
432 "not associated\n");
433 netif_tx_stop_all_queues(netdev);
434 goto drop;
435 }
436
437 queue = skb_get_queue_mapping(skb);
438 BUG_ON(queue >= IWM_TX_DATA_QUEUES);
439
440 txq = &iwm->txq[queue];
441
442
443 if (skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) {
444 IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
445 netif_stop_subqueue(netdev, queue);
446 return NETDEV_TX_BUSY;
447 }
448
449 ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype,
450 iwm->bssid, 0);
451 if (ret) {
452 IWM_ERR(iwm, "build wifi header failed\n");
453 goto drop;
454 }
455
456 dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1;
457
458 for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) {
459 sta_info = &iwm->sta_table[sta_id];
460 if (sta_info->valid &&
461 !memcmp(dst_addr, sta_info->addr, ETH_ALEN))
462 break;
463 }
464
465 if (sta_id == IWM_STA_TABLE_NUM) {
466 IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n",
467 dst_addr);
468 goto drop;
469 }
470
471 tx_info = skb_to_tx_info(skb);
472 tx_info->sta = sta_id;
473 tx_info->color = sta_info->color;
474
475 if (sta_info->qos)
476 tx_info->tid = skb->priority;
477 else
478 tx_info->tid = IWM_UMAC_MGMT_TID;
479
480 skb_queue_tail(&iwm->txq[queue].queue, skb);
481
482 queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
483
484 ndev->stats.tx_packets++;
485 ndev->stats.tx_bytes += skb->len;
486 return NETDEV_TX_OK;
487
488 drop:
489 ndev->stats.tx_dropped++;
490 dev_kfree_skb_any(skb);
491 return NETDEV_TX_OK;
492}
493