1
2
3
4
5#include <net/tso.h>
6#include <linux/tcp.h>
7
8#include "iwl-debug.h"
9#include "iwl-io.h"
10#include "fw/api/tx.h"
11#include "queue/tx.h"
12#include "iwl-fh.h"
13#include "iwl-scd.h"
14#include <linux/dmapool.h>
15
16
17
18
19void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
20{
21 int txq_id;
22
23
24
25
26
27
28 memset(trans->txqs.queue_stopped, 0,
29 sizeof(trans->txqs.queue_stopped));
30 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
31
32
33 for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
34 if (!trans->txqs.txq[txq_id])
35 continue;
36 iwl_txq_gen2_unmap(trans, txq_id);
37 }
38}
39
40
41
42
43static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
44 struct iwl_txq *txq, u16 byte_cnt,
45 int num_tbs)
46{
47 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
48 u8 filled_tfd_size, num_fetch_chunks;
49 u16 len = byte_cnt;
50 __le16 bc_ent;
51
52 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
53 return;
54
55 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
56 num_tbs * sizeof(struct iwl_tfh_tb);
57
58
59
60
61
62
63
64
65 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
66
67 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
68 struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
69
70
71 WARN_ON(trans->txqs.bc_table_dword);
72 WARN_ON(len > 0x3FFF);
73 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
74 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
75 } else {
76 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
77
78
79 WARN_ON(!trans->txqs.bc_table_dword);
80 len = DIV_ROUND_UP(len, 4);
81 WARN_ON(len > 0xFFF);
82 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
83 scd_bc_tbl->tfd_offset[idx] = bc_ent;
84 }
85}
86
87
88
89
90void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
91{
92 lockdep_assert_held(&txq->lock);
93
94 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
95
96
97
98
99
100 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
101}
102
103static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
104 struct iwl_tfh_tfd *tfd)
105{
106 return le16_to_cpu(tfd->num_tbs) & 0x1f;
107}
108
109void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
110 struct iwl_tfh_tfd *tfd)
111{
112 int i, num_tbs;
113
114
115 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
116
117 if (num_tbs > trans->txqs.tfd.max_tbs) {
118 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
119 return;
120 }
121
122
123 for (i = 1; i < num_tbs; i++) {
124 if (meta->tbs & BIT(i))
125 dma_unmap_page(trans->dev,
126 le64_to_cpu(tfd->tbs[i].addr),
127 le16_to_cpu(tfd->tbs[i].tb_len),
128 DMA_TO_DEVICE);
129 else
130 dma_unmap_single(trans->dev,
131 le64_to_cpu(tfd->tbs[i].addr),
132 le16_to_cpu(tfd->tbs[i].tb_len),
133 DMA_TO_DEVICE);
134 }
135
136 tfd->num_tbs = 0;
137}
138
139void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
140{
141
142
143
144 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
145
146 lockdep_assert_held(&txq->lock);
147
148 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
149 iwl_txq_get_tfd(trans, txq, idx));
150
151
152 if (txq->entries) {
153 struct sk_buff *skb;
154
155 skb = txq->entries[idx].skb;
156
157
158
159
160
161 if (skb) {
162 iwl_op_mode_free_skb(trans->op_mode, skb);
163 txq->entries[idx].skb = NULL;
164 }
165 }
166}
167
168int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
169 dma_addr_t addr, u16 len)
170{
171 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
172 struct iwl_tfh_tb *tb;
173
174
175
176
177
178
179
180
181 WARN(iwl_txq_crosses_4g_boundary(addr, len),
182 "possible DMA problem with iova:0x%llx, len:%d\n",
183 (unsigned long long)addr, len);
184
185 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
186 return -EINVAL;
187 tb = &tfd->tbs[idx];
188
189
190 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
191 IWL_ERR(trans, "Error can not send more than %d chunks\n",
192 trans->txqs.tfd.max_tbs);
193 return -EINVAL;
194 }
195
196 put_unaligned_le64(addr, &tb->addr);
197 tb->tb_len = cpu_to_le16(len);
198
199 tfd->num_tbs = cpu_to_le16(idx + 1);
200
201 return idx;
202}
203
204static struct page *get_workaround_page(struct iwl_trans *trans,
205 struct sk_buff *skb)
206{
207 struct page **page_ptr;
208 struct page *ret;
209
210 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
211
212 ret = alloc_page(GFP_ATOMIC);
213 if (!ret)
214 return NULL;
215
216
217 *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
218 *page_ptr = ret;
219
220 return ret;
221}
222
223
224
225
226
227
228
229static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
230 struct sk_buff *skb,
231 struct iwl_tfh_tfd *tfd,
232 dma_addr_t phys, void *virt,
233 u16 len, struct iwl_cmd_meta *meta)
234{
235 dma_addr_t oldphys = phys;
236 struct page *page;
237 int ret;
238
239 if (unlikely(dma_mapping_error(trans->dev, phys)))
240 return -ENOMEM;
241
242 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
243 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
244
245 if (ret < 0)
246 goto unmap;
247
248 if (meta)
249 meta->tbs |= BIT(ret);
250
251 ret = 0;
252 goto trace;
253 }
254
255
256
257
258
259
260
261
262
263
264 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
265 ret = -ENOBUFS;
266 goto unmap;
267 }
268
269 page = get_workaround_page(trans, skb);
270 if (!page) {
271 ret = -ENOMEM;
272 goto unmap;
273 }
274
275 memcpy(page_address(page), virt, len);
276
277 phys = dma_map_single(trans->dev, page_address(page), len,
278 DMA_TO_DEVICE);
279 if (unlikely(dma_mapping_error(trans->dev, phys)))
280 return -ENOMEM;
281 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
282 if (ret < 0) {
283
284 oldphys = phys;
285 meta = NULL;
286 goto unmap;
287 }
288 IWL_WARN(trans,
289 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
290 len, (unsigned long long)oldphys, (unsigned long long)phys);
291
292 ret = 0;
293unmap:
294 if (meta)
295 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
296 else
297 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
298trace:
299 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
300
301 return ret;
302}
303
304#ifdef CONFIG_INET
305struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
306 struct sk_buff *skb)
307{
308 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
309 struct page **page_ptr;
310
311 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
312
313 if (WARN_ON(*page_ptr))
314 return NULL;
315
316 if (!p->page)
317 goto alloc;
318
319
320
321
322
323
324
325
326
327
328
329 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
330 sizeof(void *))
331 goto out;
332
333
334 __free_page(p->page);
335
336alloc:
337 p->page = alloc_page(GFP_ATOMIC);
338 if (!p->page)
339 return NULL;
340 p->pos = page_address(p->page);
341
342 *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
343out:
344 *page_ptr = p->page;
345 get_page(p->page);
346 return p;
347}
348#endif
349
350static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
351 struct sk_buff *skb,
352 struct iwl_tfh_tfd *tfd, int start_len,
353 u8 hdr_len,
354 struct iwl_device_tx_cmd *dev_cmd)
355{
356#ifdef CONFIG_INET
357 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
358 struct ieee80211_hdr *hdr = (void *)skb->data;
359 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
360 unsigned int mss = skb_shinfo(skb)->gso_size;
361 u16 length, amsdu_pad;
362 u8 *start_hdr;
363 struct iwl_tso_hdr_page *hdr_page;
364 struct tso_t tso;
365
366 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
367 &dev_cmd->hdr, start_len, 0);
368
369 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
370 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
371 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
372 amsdu_pad = 0;
373
374
375 hdr_room = DIV_ROUND_UP(total_len, mss) *
376 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
377
378
379 hdr_page = get_page_hdr(trans, hdr_room, skb);
380 if (!hdr_page)
381 return -ENOMEM;
382
383 start_hdr = hdr_page->pos;
384
385
386
387
388
389 skb_pull(skb, hdr_len);
390
391
392
393
394
395
396 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
397
398 tso_start(skb, &tso);
399
400 while (total_len) {
401
402 unsigned int data_left = min_t(unsigned int, mss, total_len);
403 struct sk_buff *csum_skb = NULL;
404 unsigned int tb_len;
405 dma_addr_t tb_phys;
406 u8 *subf_hdrs_start = hdr_page->pos;
407
408 total_len -= data_left;
409
410 memset(hdr_page->pos, 0, amsdu_pad);
411 hdr_page->pos += amsdu_pad;
412 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
413 data_left)) & 0x3;
414 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
415 hdr_page->pos += ETH_ALEN;
416 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
417 hdr_page->pos += ETH_ALEN;
418
419 length = snap_ip_tcp_hdrlen + data_left;
420 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
421 hdr_page->pos += sizeof(length);
422
423
424
425
426
427 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
428
429 hdr_page->pos += snap_ip_tcp_hdrlen;
430
431 tb_len = hdr_page->pos - start_hdr;
432 tb_phys = dma_map_single(trans->dev, start_hdr,
433 tb_len, DMA_TO_DEVICE);
434 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
435 dev_kfree_skb(csum_skb);
436 goto out_err;
437 }
438
439
440
441
442
443 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
444 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
445 tb_phys, tb_len);
446
447 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
448
449
450 start_hdr = hdr_page->pos;
451
452
453 while (data_left) {
454 int ret;
455
456 tb_len = min_t(unsigned int, tso.size, data_left);
457 tb_phys = dma_map_single(trans->dev, tso.data,
458 tb_len, DMA_TO_DEVICE);
459 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
460 tb_phys, tso.data,
461 tb_len, NULL);
462 if (ret) {
463 dev_kfree_skb(csum_skb);
464 goto out_err;
465 }
466
467 data_left -= tb_len;
468 tso_build_data(skb, &tso, tb_len);
469 }
470 }
471
472
473 skb_push(skb, hdr_len);
474
475 return 0;
476
477out_err:
478#endif
479 return -EINVAL;
480}
481
482static struct
483iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
484 struct iwl_txq *txq,
485 struct iwl_device_tx_cmd *dev_cmd,
486 struct sk_buff *skb,
487 struct iwl_cmd_meta *out_meta,
488 int hdr_len,
489 int tx_cmd_len)
490{
491 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
492 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
493 dma_addr_t tb_phys;
494 int len;
495 void *tb1_addr;
496
497 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
498
499
500
501
502
503
504 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
505
506
507
508
509
510
511
512 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
513 IWL_FIRST_TB_SIZE;
514
515
516
517
518 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
519 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
520 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
521 goto out_err;
522
523
524
525
526 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
527
528 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
529 hdr_len, dev_cmd))
530 goto out_err;
531
532
533 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
534 return tfd;
535
536out_err:
537 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
538 return NULL;
539}
540
541static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
542 struct sk_buff *skb,
543 struct iwl_tfh_tfd *tfd,
544 struct iwl_cmd_meta *out_meta)
545{
546 int i;
547
548 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
549 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
550 dma_addr_t tb_phys;
551 unsigned int fragsz = skb_frag_size(frag);
552 int ret;
553
554 if (!fragsz)
555 continue;
556
557 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
558 fragsz, DMA_TO_DEVICE);
559 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
560 skb_frag_address(frag),
561 fragsz, out_meta);
562 if (ret)
563 return ret;
564 }
565
566 return 0;
567}
568
569static struct
570iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
571 struct iwl_txq *txq,
572 struct iwl_device_tx_cmd *dev_cmd,
573 struct sk_buff *skb,
574 struct iwl_cmd_meta *out_meta,
575 int hdr_len,
576 int tx_cmd_len,
577 bool pad)
578{
579 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
580 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
581 dma_addr_t tb_phys;
582 int len, tb1_len, tb2_len;
583 void *tb1_addr;
584 struct sk_buff *frag;
585
586 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
587
588
589 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
590
591
592
593
594
595
596 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
597
598
599
600
601
602
603
604 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
605 IWL_FIRST_TB_SIZE;
606
607 if (pad)
608 tb1_len = ALIGN(len, 4);
609 else
610 tb1_len = len;
611
612
613 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
614 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
615 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
616 goto out_err;
617
618
619
620
621 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
622 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
623 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
624
625
626 tb2_len = skb_headlen(skb) - hdr_len;
627
628 if (tb2_len > 0) {
629 int ret;
630
631 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
632 tb2_len, DMA_TO_DEVICE);
633 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
634 skb->data + hdr_len, tb2_len,
635 NULL);
636 if (ret)
637 goto out_err;
638 }
639
640 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
641 goto out_err;
642
643 skb_walk_frags(skb, frag) {
644 int ret;
645
646 tb_phys = dma_map_single(trans->dev, frag->data,
647 skb_headlen(frag), DMA_TO_DEVICE);
648 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
649 frag->data,
650 skb_headlen(frag), NULL);
651 if (ret)
652 goto out_err;
653 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
654 goto out_err;
655 }
656
657 return tfd;
658
659out_err:
660 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
661 return NULL;
662}
663
664static
665struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
666 struct iwl_txq *txq,
667 struct iwl_device_tx_cmd *dev_cmd,
668 struct sk_buff *skb,
669 struct iwl_cmd_meta *out_meta)
670{
671 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
672 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
673 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
674 int len, hdr_len;
675 bool amsdu;
676
677
678 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
679
680 memset(tfd, 0, sizeof(*tfd));
681
682 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
683 len = sizeof(struct iwl_tx_cmd_gen2);
684 else
685 len = sizeof(struct iwl_tx_cmd_gen3);
686
687 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
688 (*ieee80211_get_qos_ctl(hdr) &
689 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
690
691 hdr_len = ieee80211_hdrlen(hdr->frame_control);
692
693
694
695
696
697
698 if (amsdu && skb_shinfo(skb)->gso_size)
699 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
700 out_meta, hdr_len, len);
701 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
702 hdr_len, len, !amsdu);
703}
704
705int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
706{
707 unsigned int max;
708 unsigned int used;
709
710
711
712
713
714
715
716 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
717 max = q->n_window;
718 else
719 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
720
721
722
723
724
725 used = (q->write_ptr - q->read_ptr) &
726 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
727
728 if (WARN_ON(used > max))
729 return 0;
730
731 return max - used;
732}
733
734int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
735 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
736{
737 struct iwl_cmd_meta *out_meta;
738 struct iwl_txq *txq = trans->txqs.txq[txq_id];
739 u16 cmd_len;
740 int idx;
741 void *tfd;
742
743 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
744 "queue %d out of range", txq_id))
745 return -EINVAL;
746
747 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
748 "TX on unused queue %d\n", txq_id))
749 return -EINVAL;
750
751 if (skb_is_nonlinear(skb) &&
752 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
753 __skb_linearize(skb))
754 return -ENOMEM;
755
756 spin_lock(&txq->lock);
757
758 if (iwl_txq_space(trans, txq) < txq->high_mark) {
759 iwl_txq_stop(trans, txq);
760
761
762 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
763 struct iwl_device_tx_cmd **dev_cmd_ptr;
764
765 dev_cmd_ptr = (void *)((u8 *)skb->cb +
766 trans->txqs.dev_cmd_offs);
767
768 *dev_cmd_ptr = dev_cmd;
769 __skb_queue_tail(&txq->overflow_q, skb);
770 spin_unlock(&txq->lock);
771 return 0;
772 }
773 }
774
775 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
776
777
778 txq->entries[idx].skb = skb;
779 txq->entries[idx].cmd = dev_cmd;
780
781 dev_cmd->hdr.sequence =
782 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
783 INDEX_TO_SEQ(idx)));
784
785
786 out_meta = &txq->entries[idx].meta;
787 out_meta->flags = 0;
788
789 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
790 if (!tfd) {
791 spin_unlock(&txq->lock);
792 return -1;
793 }
794
795 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
796 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
797 (void *)dev_cmd->payload;
798
799 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
800 } else {
801 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
802 (void *)dev_cmd->payload;
803
804 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
805 }
806
807
808 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
809 iwl_txq_gen2_get_num_tbs(trans, tfd));
810
811
812 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
813 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
814
815
816 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
817 iwl_txq_inc_wr_ptr(trans, txq);
818
819
820
821
822 spin_unlock(&txq->lock);
823 return 0;
824}
825
826
827
828
829
830
831void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
832{
833 struct iwl_txq *txq = trans->txqs.txq[txq_id];
834
835 spin_lock_bh(&txq->lock);
836 while (txq->write_ptr != txq->read_ptr) {
837 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
838 txq_id, txq->read_ptr);
839
840 if (txq_id != trans->txqs.cmd.q_id) {
841 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
842 struct sk_buff *skb = txq->entries[idx].skb;
843
844 if (WARN_ON_ONCE(!skb))
845 continue;
846
847 iwl_txq_free_tso_page(trans, skb);
848 }
849 iwl_txq_gen2_free_tfd(trans, txq);
850 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
851 }
852
853 while (!skb_queue_empty(&txq->overflow_q)) {
854 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
855
856 iwl_op_mode_free_skb(trans->op_mode, skb);
857 }
858
859 spin_unlock_bh(&txq->lock);
860
861
862 iwl_wake_queue(trans, txq);
863}
864
865static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
866 struct iwl_txq *txq)
867{
868 struct device *dev = trans->dev;
869
870
871 if (txq->tfds) {
872 dma_free_coherent(dev,
873 trans->txqs.tfd.size * txq->n_window,
874 txq->tfds, txq->dma_addr);
875 dma_free_coherent(dev,
876 sizeof(*txq->first_tb_bufs) * txq->n_window,
877 txq->first_tb_bufs, txq->first_tb_dma);
878 }
879
880 kfree(txq->entries);
881 if (txq->bc_tbl.addr)
882 dma_pool_free(trans->txqs.bc_pool,
883 txq->bc_tbl.addr, txq->bc_tbl.dma);
884 kfree(txq);
885}
886
887
888
889
890
891
892
893
894
895static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
896{
897 struct iwl_txq *txq;
898 int i;
899
900 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
901 "queue %d out of range", txq_id))
902 return;
903
904 txq = trans->txqs.txq[txq_id];
905
906 if (WARN_ON(!txq))
907 return;
908
909 iwl_txq_gen2_unmap(trans, txq_id);
910
911
912 if (txq_id == trans->txqs.cmd.q_id)
913 for (i = 0; i < txq->n_window; i++) {
914 kfree_sensitive(txq->entries[i].cmd);
915 kfree_sensitive(txq->entries[i].free_buf);
916 }
917 del_timer_sync(&txq->stuck_timer);
918
919 iwl_txq_gen2_free_memory(trans, txq);
920
921 trans->txqs.txq[txq_id] = NULL;
922
923 clear_bit(txq_id, trans->txqs.queue_used);
924}
925
926
927
928
929static int iwl_queue_init(struct iwl_txq *q, int slots_num)
930{
931 q->n_window = slots_num;
932
933
934
935 if (WARN_ON(!is_power_of_2(slots_num)))
936 return -EINVAL;
937
938 q->low_mark = q->n_window / 4;
939 if (q->low_mark < 4)
940 q->low_mark = 4;
941
942 q->high_mark = q->n_window / 8;
943 if (q->high_mark < 2)
944 q->high_mark = 2;
945
946 q->write_ptr = 0;
947 q->read_ptr = 0;
948
949 return 0;
950}
951
952int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
953 bool cmd_queue)
954{
955 int ret;
956 u32 tfd_queue_max_size =
957 trans->trans_cfg->base_params->max_tfd_queue_size;
958
959 txq->need_update = false;
960
961
962
963 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
964 "Max tfd queue size must be a power of two, but is %d",
965 tfd_queue_max_size))
966 return -EINVAL;
967
968
969 ret = iwl_queue_init(txq, slots_num);
970 if (ret)
971 return ret;
972
973 spin_lock_init(&txq->lock);
974
975 if (cmd_queue) {
976 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
977
978 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
979 }
980
981 __skb_queue_head_init(&txq->overflow_q);
982
983 return 0;
984}
985
986void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
987{
988 struct page **page_ptr;
989 struct page *next;
990
991 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
992 next = *page_ptr;
993 *page_ptr = NULL;
994
995 while (next) {
996 struct page *tmp = next;
997
998 next = *(void **)(page_address(next) + PAGE_SIZE -
999 sizeof(void *));
1000 __free_page(tmp);
1001 }
1002}
1003
1004void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
1005{
1006 u32 txq_id = txq->id;
1007 u32 status;
1008 bool active;
1009 u8 fifo;
1010
1011 if (trans->trans_cfg->use_tfh) {
1012 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
1013 txq->read_ptr, txq->write_ptr);
1014
1015 return;
1016 }
1017
1018 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
1019 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1020 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1021
1022 IWL_ERR(trans,
1023 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1024 txq_id, active ? "" : "in", fifo,
1025 jiffies_to_msecs(txq->wd_timeout),
1026 txq->read_ptr, txq->write_ptr,
1027 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
1028 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1029 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1030 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1031 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1032}
1033
1034static void iwl_txq_stuck_timer(struct timer_list *t)
1035{
1036 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1037 struct iwl_trans *trans = txq->trans;
1038
1039 spin_lock(&txq->lock);
1040
1041 if (txq->read_ptr == txq->write_ptr) {
1042 spin_unlock(&txq->lock);
1043 return;
1044 }
1045 spin_unlock(&txq->lock);
1046
1047 iwl_txq_log_scd_error(trans, txq);
1048
1049 iwl_force_nmi(trans);
1050}
1051
1052int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1053 bool cmd_queue)
1054{
1055 size_t tfd_sz = trans->txqs.tfd.size *
1056 trans->trans_cfg->base_params->max_tfd_queue_size;
1057 size_t tb0_buf_sz;
1058 int i;
1059
1060 if (WARN_ON(txq->entries || txq->tfds))
1061 return -EINVAL;
1062
1063 if (trans->trans_cfg->use_tfh)
1064 tfd_sz = trans->txqs.tfd.size * slots_num;
1065
1066 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1067 txq->trans = trans;
1068
1069 txq->n_window = slots_num;
1070
1071 txq->entries = kcalloc(slots_num,
1072 sizeof(struct iwl_pcie_txq_entry),
1073 GFP_KERNEL);
1074
1075 if (!txq->entries)
1076 goto error;
1077
1078 if (cmd_queue)
1079 for (i = 0; i < slots_num; i++) {
1080 txq->entries[i].cmd =
1081 kmalloc(sizeof(struct iwl_device_cmd),
1082 GFP_KERNEL);
1083 if (!txq->entries[i].cmd)
1084 goto error;
1085 }
1086
1087
1088
1089 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1090 &txq->dma_addr, GFP_KERNEL);
1091 if (!txq->tfds)
1092 goto error;
1093
1094 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1095
1096 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1097
1098 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1099 &txq->first_tb_dma,
1100 GFP_KERNEL);
1101 if (!txq->first_tb_bufs)
1102 goto err_free_tfds;
1103
1104 return 0;
1105err_free_tfds:
1106 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1107error:
1108 if (txq->entries && cmd_queue)
1109 for (i = 0; i < slots_num; i++)
1110 kfree(txq->entries[i].cmd);
1111 kfree(txq->entries);
1112 txq->entries = NULL;
1113
1114 return -ENOMEM;
1115}
1116
1117static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
1118 struct iwl_txq **intxq, int size,
1119 unsigned int timeout)
1120{
1121 size_t bc_tbl_size, bc_tbl_entries;
1122 struct iwl_txq *txq;
1123 int ret;
1124
1125 WARN_ON(!trans->txqs.bc_tbl_size);
1126
1127 bc_tbl_size = trans->txqs.bc_tbl_size;
1128 bc_tbl_entries = bc_tbl_size / sizeof(u16);
1129
1130 if (WARN_ON(size > bc_tbl_entries))
1131 return -EINVAL;
1132
1133 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1134 if (!txq)
1135 return -ENOMEM;
1136
1137 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1138 &txq->bc_tbl.dma);
1139 if (!txq->bc_tbl.addr) {
1140 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1141 kfree(txq);
1142 return -ENOMEM;
1143 }
1144
1145 ret = iwl_txq_alloc(trans, txq, size, false);
1146 if (ret) {
1147 IWL_ERR(trans, "Tx queue alloc failed\n");
1148 goto error;
1149 }
1150 ret = iwl_txq_init(trans, txq, size, false);
1151 if (ret) {
1152 IWL_ERR(trans, "Tx queue init failed\n");
1153 goto error;
1154 }
1155
1156 txq->wd_timeout = msecs_to_jiffies(timeout);
1157
1158 *intxq = txq;
1159 return 0;
1160
1161error:
1162 iwl_txq_gen2_free_memory(trans, txq);
1163 return ret;
1164}
1165
1166static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1167 struct iwl_host_cmd *hcmd)
1168{
1169 struct iwl_tx_queue_cfg_rsp *rsp;
1170 int ret, qid;
1171 u32 wr_ptr;
1172
1173 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1174 sizeof(*rsp))) {
1175 ret = -EINVAL;
1176 goto error_free_resp;
1177 }
1178
1179 rsp = (void *)hcmd->resp_pkt->data;
1180 qid = le16_to_cpu(rsp->queue_number);
1181 wr_ptr = le16_to_cpu(rsp->write_pointer);
1182
1183 if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1184 WARN_ONCE(1, "queue index %d unsupported", qid);
1185 ret = -EIO;
1186 goto error_free_resp;
1187 }
1188
1189 if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1190 WARN_ONCE(1, "queue %d already used", qid);
1191 ret = -EIO;
1192 goto error_free_resp;
1193 }
1194
1195 txq->id = qid;
1196 trans->txqs.txq[qid] = txq;
1197 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1198
1199
1200 txq->read_ptr = wr_ptr;
1201 txq->write_ptr = wr_ptr;
1202
1203 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1204
1205 iwl_free_resp(hcmd);
1206 return qid;
1207
1208error_free_resp:
1209 iwl_free_resp(hcmd);
1210 iwl_txq_gen2_free_memory(trans, txq);
1211 return ret;
1212}
1213
1214int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
1215 int cmd_id, int size, unsigned int timeout)
1216{
1217 struct iwl_txq *txq = NULL;
1218 struct iwl_tx_queue_cfg_cmd cmd = {
1219 .flags = flags,
1220 .sta_id = sta_id,
1221 .tid = tid,
1222 };
1223 struct iwl_host_cmd hcmd = {
1224 .id = cmd_id,
1225 .len = { sizeof(cmd) },
1226 .data = { &cmd, },
1227 .flags = CMD_WANT_SKB,
1228 };
1229 int ret;
1230
1231 ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
1232 if (ret)
1233 return ret;
1234
1235 cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
1236 cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1237 cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1238
1239 ret = iwl_trans_send_cmd(trans, &hcmd);
1240 if (ret)
1241 goto error;
1242
1243 return iwl_txq_alloc_response(trans, txq, &hcmd);
1244
1245error:
1246 iwl_txq_gen2_free_memory(trans, txq);
1247 return ret;
1248}
1249
1250void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1251{
1252 if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1253 "queue %d out of range", queue))
1254 return;
1255
1256
1257
1258
1259
1260
1261
1262 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1263 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1264 "queue %d not used", queue);
1265 return;
1266 }
1267
1268 iwl_txq_gen2_free(trans, queue);
1269
1270 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1271}
1272
1273void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1274{
1275 int i;
1276
1277 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1278
1279
1280 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1281 if (!trans->txqs.txq[i])
1282 continue;
1283
1284 iwl_txq_gen2_free(trans, i);
1285 }
1286}
1287
1288int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1289{
1290 struct iwl_txq *queue;
1291 int ret;
1292
1293
1294 if (!trans->txqs.txq[txq_id]) {
1295 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1296 if (!queue) {
1297 IWL_ERR(trans, "Not enough memory for tx queue\n");
1298 return -ENOMEM;
1299 }
1300 trans->txqs.txq[txq_id] = queue;
1301 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1302 if (ret) {
1303 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1304 goto error;
1305 }
1306 } else {
1307 queue = trans->txqs.txq[txq_id];
1308 }
1309
1310 ret = iwl_txq_init(trans, queue, queue_size,
1311 (txq_id == trans->txqs.cmd.q_id));
1312 if (ret) {
1313 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1314 goto error;
1315 }
1316 trans->txqs.txq[txq_id]->id = txq_id;
1317 set_bit(txq_id, trans->txqs.queue_used);
1318
1319 return 0;
1320
1321error:
1322 iwl_txq_gen2_tx_free(trans);
1323 return ret;
1324}
1325
1326static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1327 void *_tfd, u8 idx)
1328{
1329 struct iwl_tfd *tfd;
1330 struct iwl_tfd_tb *tb;
1331 dma_addr_t addr;
1332 dma_addr_t hi_len;
1333
1334 if (trans->trans_cfg->use_tfh) {
1335 struct iwl_tfh_tfd *tfd = _tfd;
1336 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
1337
1338 return (dma_addr_t)(le64_to_cpu(tb->addr));
1339 }
1340
1341 tfd = _tfd;
1342 tb = &tfd->tbs[idx];
1343 addr = get_unaligned_le32(&tb->lo);
1344
1345 if (sizeof(dma_addr_t) <= sizeof(u32))
1346 return addr;
1347
1348 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1349
1350
1351
1352
1353
1354
1355 return addr | ((hi_len << 16) << 16);
1356}
1357
1358void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1359 struct iwl_cmd_meta *meta,
1360 struct iwl_txq *txq, int index)
1361{
1362 int i, num_tbs;
1363 void *tfd = iwl_txq_get_tfd(trans, txq, index);
1364
1365
1366 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1367
1368 if (num_tbs > trans->txqs.tfd.max_tbs) {
1369 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1370
1371 return;
1372 }
1373
1374
1375
1376 for (i = 1; i < num_tbs; i++) {
1377 if (meta->tbs & BIT(i))
1378 dma_unmap_page(trans->dev,
1379 iwl_txq_gen1_tfd_tb_get_addr(trans,
1380 tfd, i),
1381 iwl_txq_gen1_tfd_tb_get_len(trans,
1382 tfd, i),
1383 DMA_TO_DEVICE);
1384 else
1385 dma_unmap_single(trans->dev,
1386 iwl_txq_gen1_tfd_tb_get_addr(trans,
1387 tfd, i),
1388 iwl_txq_gen1_tfd_tb_get_len(trans,
1389 tfd, i),
1390 DMA_TO_DEVICE);
1391 }
1392
1393 meta->tbs = 0;
1394
1395 if (trans->trans_cfg->use_tfh) {
1396 struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
1397
1398 tfd_fh->num_tbs = 0;
1399 } else {
1400 struct iwl_tfd *tfd_fh = (void *)tfd;
1401
1402 tfd_fh->num_tbs = 0;
1403 }
1404}
1405
1406#define IWL_TX_CRC_SIZE 4
1407#define IWL_TX_DELIMITER_SIZE 4
1408
1409
1410
1411
1412void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1413 struct iwl_txq *txq, u16 byte_cnt,
1414 int num_tbs)
1415{
1416 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1417 int write_ptr = txq->write_ptr;
1418 int txq_id = txq->id;
1419 u8 sec_ctl = 0;
1420 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1421 __le16 bc_ent;
1422 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1423 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1424 u8 sta_id = tx_cmd->sta_id;
1425
1426 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1427
1428 sec_ctl = tx_cmd->sec_ctl;
1429
1430 switch (sec_ctl & TX_CMD_SEC_MSK) {
1431 case TX_CMD_SEC_CCM:
1432 len += IEEE80211_CCMP_MIC_LEN;
1433 break;
1434 case TX_CMD_SEC_TKIP:
1435 len += IEEE80211_TKIP_ICV_LEN;
1436 break;
1437 case TX_CMD_SEC_WEP:
1438 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1439 break;
1440 }
1441 if (trans->txqs.bc_table_dword)
1442 len = DIV_ROUND_UP(len, 4);
1443
1444 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1445 return;
1446
1447 bc_ent = cpu_to_le16(len | (sta_id << 12));
1448
1449 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1450
1451 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1452 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1453 bc_ent;
1454}
1455
1456void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1457 struct iwl_txq *txq)
1458{
1459 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1460 int txq_id = txq->id;
1461 int read_ptr = txq->read_ptr;
1462 u8 sta_id = 0;
1463 __le16 bc_ent;
1464 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1465 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1466
1467 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1468
1469 if (txq_id != trans->txqs.cmd.q_id)
1470 sta_id = tx_cmd->sta_id;
1471
1472 bc_ent = cpu_to_le16(1 | (sta_id << 12));
1473
1474 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1475
1476 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1477 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1478 bc_ent;
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1491{
1492
1493
1494
1495 int rd_ptr = txq->read_ptr;
1496 int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
1497
1498 lockdep_assert_held(&txq->lock);
1499
1500
1501
1502
1503 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
1504
1505
1506 if (txq->entries) {
1507 struct sk_buff *skb;
1508
1509 skb = txq->entries[idx].skb;
1510
1511
1512
1513
1514
1515 if (skb) {
1516 iwl_op_mode_free_skb(trans->op_mode, skb);
1517 txq->entries[idx].skb = NULL;
1518 }
1519 }
1520}
1521
1522void iwl_txq_progress(struct iwl_txq *txq)
1523{
1524 lockdep_assert_held(&txq->lock);
1525
1526 if (!txq->wd_timeout)
1527 return;
1528
1529
1530
1531
1532
1533 if (txq->frozen)
1534 return;
1535
1536
1537
1538
1539
1540 if (txq->read_ptr == txq->write_ptr)
1541 del_timer(&txq->stuck_timer);
1542 else
1543 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1544}
1545
1546
1547void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1548 struct sk_buff_head *skbs)
1549{
1550 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1551 int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1552 int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1553 int last_to_free;
1554
1555
1556 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1557 return;
1558
1559 spin_lock_bh(&txq->lock);
1560
1561 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1562 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1563 txq_id, ssn);
1564 goto out;
1565 }
1566
1567 if (read_ptr == tfd_num)
1568 goto out;
1569
1570 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1571 txq_id, txq->read_ptr, tfd_num, ssn);
1572
1573
1574
1575 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1576
1577 if (!iwl_txq_used(txq, last_to_free)) {
1578 IWL_ERR(trans,
1579 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1580 __func__, txq_id, last_to_free,
1581 trans->trans_cfg->base_params->max_tfd_queue_size,
1582 txq->write_ptr, txq->read_ptr);
1583 goto out;
1584 }
1585
1586 if (WARN_ON(!skb_queue_empty(skbs)))
1587 goto out;
1588
1589 for (;
1590 read_ptr != tfd_num;
1591 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1592 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1593 struct sk_buff *skb = txq->entries[read_ptr].skb;
1594
1595 if (WARN_ON_ONCE(!skb))
1596 continue;
1597
1598 iwl_txq_free_tso_page(trans, skb);
1599
1600 __skb_queue_tail(skbs, skb);
1601
1602 txq->entries[read_ptr].skb = NULL;
1603
1604 if (!trans->trans_cfg->use_tfh)
1605 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1606
1607 iwl_txq_free_tfd(trans, txq);
1608 }
1609
1610 iwl_txq_progress(txq);
1611
1612 if (iwl_txq_space(trans, txq) > txq->low_mark &&
1613 test_bit(txq_id, trans->txqs.queue_stopped)) {
1614 struct sk_buff_head overflow_skbs;
1615
1616 __skb_queue_head_init(&overflow_skbs);
1617 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1618
1619
1620
1621
1622
1623
1624
1625
1626 txq->overflow_tx = true;
1627
1628
1629
1630
1631
1632
1633
1634
1635 spin_unlock_bh(&txq->lock);
1636
1637 while (!skb_queue_empty(&overflow_skbs)) {
1638 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1639 struct iwl_device_tx_cmd *dev_cmd_ptr;
1640
1641 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1642 trans->txqs.dev_cmd_offs);
1643
1644
1645
1646
1647
1648
1649 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1650 }
1651
1652 if (iwl_txq_space(trans, txq) > txq->low_mark)
1653 iwl_wake_queue(trans, txq);
1654
1655 spin_lock_bh(&txq->lock);
1656 txq->overflow_tx = false;
1657 }
1658
1659out:
1660 spin_unlock_bh(&txq->lock);
1661}
1662
1663
1664void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1665{
1666 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1667
1668 spin_lock_bh(&txq->lock);
1669
1670 txq->write_ptr = ptr;
1671 txq->read_ptr = txq->write_ptr;
1672
1673 spin_unlock_bh(&txq->lock);
1674}
1675
1676void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1677 bool freeze)
1678{
1679 int queue;
1680
1681 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1682 struct iwl_txq *txq = trans->txqs.txq[queue];
1683 unsigned long now;
1684
1685 spin_lock_bh(&txq->lock);
1686
1687 now = jiffies;
1688
1689 if (txq->frozen == freeze)
1690 goto next_queue;
1691
1692 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1693 freeze ? "Freezing" : "Waking", queue);
1694
1695 txq->frozen = freeze;
1696
1697 if (txq->read_ptr == txq->write_ptr)
1698 goto next_queue;
1699
1700 if (freeze) {
1701 if (unlikely(time_after(now,
1702 txq->stuck_timer.expires))) {
1703
1704
1705
1706
1707 goto next_queue;
1708 }
1709
1710 txq->frozen_expiry_remainder =
1711 txq->stuck_timer.expires - now;
1712 del_timer(&txq->stuck_timer);
1713 goto next_queue;
1714 }
1715
1716
1717
1718
1719
1720 mod_timer(&txq->stuck_timer,
1721 now + txq->frozen_expiry_remainder);
1722
1723next_queue:
1724 spin_unlock_bh(&txq->lock);
1725 }
1726}
1727
1728