1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/pm_runtime.h>
54#include <net/tso.h>
55#include <linux/tcp.h>
56
57#include "iwl-debug.h"
58#include "iwl-csr.h"
59#include "iwl-io.h"
60#include "internal.h"
61#include "fw/api/tx.h"
62
63
64
65
66void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
67{
68 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
69 int txq_id;
70
71
72
73
74
75
76 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
77 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
78
79
80 for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
81 if (!trans_pcie->txq[txq_id])
82 continue;
83 iwl_pcie_gen2_txq_unmap(trans, txq_id);
84 }
85}
86
87
88
89
90static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
91 struct iwl_txq *txq, u16 byte_cnt,
92 int num_tbs)
93{
94 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
95 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
96 struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
97 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
98 u8 filled_tfd_size, num_fetch_chunks;
99 u16 len = byte_cnt;
100 __le16 bc_ent;
101
102 if (trans_pcie->bc_table_dword)
103 len = DIV_ROUND_UP(len, 4);
104
105 if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
106 return;
107
108 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
109 num_tbs * sizeof(struct iwl_tfh_tb);
110
111
112
113
114
115
116
117
118 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
119
120 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
121 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
123 else
124 scd_bc_tbl->tfd_offset[idx] = bc_ent;
125}
126
127
128
129
130static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
131 struct iwl_txq *txq)
132{
133 lockdep_assert_held(&txq->lock);
134
135 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
136
137
138
139
140
141 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
142}
143
144static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
145 struct iwl_tfh_tfd *tfd)
146{
147 return le16_to_cpu(tfd->num_tbs) & 0x1f;
148}
149
150static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
151 struct iwl_cmd_meta *meta,
152 struct iwl_tfh_tfd *tfd)
153{
154 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
155 int i, num_tbs;
156
157
158 num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
159
160 if (num_tbs > trans_pcie->max_tbs) {
161 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
162 return;
163 }
164
165
166 for (i = 1; i < num_tbs; i++) {
167 if (meta->tbs & BIT(i))
168 dma_unmap_page(trans->dev,
169 le64_to_cpu(tfd->tbs[i].addr),
170 le16_to_cpu(tfd->tbs[i].tb_len),
171 DMA_TO_DEVICE);
172 else
173 dma_unmap_single(trans->dev,
174 le64_to_cpu(tfd->tbs[i].addr),
175 le16_to_cpu(tfd->tbs[i].tb_len),
176 DMA_TO_DEVICE);
177 }
178
179 tfd->num_tbs = 0;
180}
181
182static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
183{
184
185
186
187 int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
188
189 lockdep_assert_held(&txq->lock);
190
191 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
192 iwl_pcie_get_tfd(trans, txq, idx));
193
194
195 if (txq->entries) {
196 struct sk_buff *skb;
197
198 skb = txq->entries[idx].skb;
199
200
201
202
203
204 if (skb) {
205 iwl_op_mode_free_skb(trans->op_mode, skb);
206 txq->entries[idx].skb = NULL;
207 }
208 }
209}
210
211static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
212 struct iwl_tfh_tfd *tfd, dma_addr_t addr,
213 u16 len)
214{
215 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
216 int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
217 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
218
219
220 if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
221 IWL_ERR(trans, "Error can not send more than %d chunks\n",
222 trans_pcie->max_tbs);
223 return -EINVAL;
224 }
225
226 put_unaligned_le64(addr, &tb->addr);
227 tb->tb_len = cpu_to_le16(len);
228
229 tfd->num_tbs = cpu_to_le16(idx + 1);
230
231 return idx;
232}
233
234static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
235 struct sk_buff *skb,
236 struct iwl_tfh_tfd *tfd, int start_len,
237 u8 hdr_len, struct iwl_device_cmd *dev_cmd)
238{
239#ifdef CONFIG_INET
240 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
241 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
242 struct ieee80211_hdr *hdr = (void *)skb->data;
243 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
244 unsigned int mss = skb_shinfo(skb)->gso_size;
245 u16 length, iv_len, amsdu_pad;
246 u8 *start_hdr;
247 struct iwl_tso_hdr_page *hdr_page;
248 struct page **page_ptr;
249 struct tso_t tso;
250
251
252 iv_len = ieee80211_has_protected(hdr->frame_control) ?
253 IEEE80211_CCMP_HDR_LEN : 0;
254
255 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
256 &dev_cmd->hdr, start_len, 0);
257
258 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
259 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
260 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
261 amsdu_pad = 0;
262
263
264 hdr_room = DIV_ROUND_UP(total_len, mss) *
265 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
266
267
268 hdr_page = get_page_hdr(trans, hdr_room);
269 if (!hdr_page)
270 return -ENOMEM;
271
272 get_page(hdr_page->page);
273 start_hdr = hdr_page->pos;
274 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
275 *page_ptr = hdr_page->page;
276 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
277 hdr_page->pos += iv_len;
278
279
280
281
282
283 skb_pull(skb, hdr_len + iv_len);
284
285
286
287
288
289
290 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
291
292 tso_start(skb, &tso);
293
294 while (total_len) {
295
296 unsigned int data_left = min_t(unsigned int, mss, total_len);
297 struct sk_buff *csum_skb = NULL;
298 unsigned int tb_len;
299 dma_addr_t tb_phys;
300 u8 *subf_hdrs_start = hdr_page->pos;
301
302 total_len -= data_left;
303
304 memset(hdr_page->pos, 0, amsdu_pad);
305 hdr_page->pos += amsdu_pad;
306 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
307 data_left)) & 0x3;
308 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
309 hdr_page->pos += ETH_ALEN;
310 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
311 hdr_page->pos += ETH_ALEN;
312
313 length = snap_ip_tcp_hdrlen + data_left;
314 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
315 hdr_page->pos += sizeof(length);
316
317
318
319
320
321 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
322
323 hdr_page->pos += snap_ip_tcp_hdrlen;
324
325 tb_len = hdr_page->pos - start_hdr;
326 tb_phys = dma_map_single(trans->dev, start_hdr,
327 tb_len, DMA_TO_DEVICE);
328 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
329 dev_kfree_skb(csum_skb);
330 goto out_err;
331 }
332 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
333 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
334
335 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
336
337
338 start_hdr = hdr_page->pos;
339
340
341 while (data_left) {
342 tb_len = min_t(unsigned int, tso.size, data_left);
343 tb_phys = dma_map_single(trans->dev, tso.data,
344 tb_len, DMA_TO_DEVICE);
345 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
346 dev_kfree_skb(csum_skb);
347 goto out_err;
348 }
349 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
350 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
351 tb_len);
352
353 data_left -= tb_len;
354 tso_build_data(skb, &tso, tb_len);
355 }
356 }
357
358
359 skb_push(skb, hdr_len + iv_len);
360
361 return 0;
362
363out_err:
364#endif
365 return -EINVAL;
366}
367
368static struct
369iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
370 struct iwl_txq *txq,
371 struct iwl_device_cmd *dev_cmd,
372 struct sk_buff *skb,
373 struct iwl_cmd_meta *out_meta,
374 int hdr_len,
375 int tx_cmd_len)
376{
377 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
378 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
379 dma_addr_t tb_phys;
380 int len;
381 void *tb1_addr;
382
383 tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
384
385 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
386
387
388
389
390
391
392
393 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
394 IWL_FIRST_TB_SIZE;
395
396
397
398
399 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
400 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
401 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
402 goto out_err;
403 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
404
405 if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
406 len + IWL_FIRST_TB_SIZE,
407 hdr_len, dev_cmd))
408 goto out_err;
409
410
411 memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
412 return tfd;
413
414out_err:
415 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
416 return NULL;
417}
418
419static struct
420iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
421 struct iwl_txq *txq,
422 struct iwl_device_cmd *dev_cmd,
423 struct sk_buff *skb,
424 struct iwl_cmd_meta *out_meta,
425 int hdr_len,
426 int tx_cmd_len)
427{
428 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
429 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
430 dma_addr_t tb_phys;
431 int i, len, tb1_len, tb2_len;
432 void *tb1_addr;
433
434 tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
435
436
437 memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
438
439 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
440
441
442
443
444
445
446
447 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
448 IWL_FIRST_TB_SIZE;
449
450 tb1_len = ALIGN(len, 4);
451
452
453 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
454 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
455 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
456 goto out_err;
457 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
458
459
460 tb2_len = skb_headlen(skb) - hdr_len;
461
462 if (tb2_len > 0) {
463 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
464 tb2_len, DMA_TO_DEVICE);
465 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
466 goto out_err;
467 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
468 }
469
470
471 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
472 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
473 int tb_idx;
474
475 if (!skb_frag_size(frag))
476 continue;
477
478 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
479 skb_frag_size(frag), DMA_TO_DEVICE);
480
481 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
482 goto out_err;
483 tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
484 skb_frag_size(frag));
485
486 out_meta->tbs |= BIT(tb_idx);
487 }
488
489 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
490 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
491 trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
492
493 return tfd;
494
495out_err:
496 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
497 return NULL;
498}
499
500static
501struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
502 struct iwl_txq *txq,
503 struct iwl_device_cmd *dev_cmd,
504 struct sk_buff *skb,
505 struct iwl_cmd_meta *out_meta)
506{
507 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
508 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
509 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
510 int len, hdr_len;
511 bool amsdu;
512
513
514 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
515
516 memset(tfd, 0, sizeof(*tfd));
517
518 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
519 len = sizeof(struct iwl_tx_cmd_gen2);
520 else
521 len = sizeof(struct iwl_tx_cmd_gen3);
522
523 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
524 (*ieee80211_get_qos_ctl(hdr) &
525 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
526
527 hdr_len = ieee80211_hdrlen(hdr->frame_control);
528
529 if (amsdu)
530 return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
531 out_meta, hdr_len, len);
532
533 return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
534 hdr_len, len);
535}
536
537int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
538 struct iwl_device_cmd *dev_cmd, int txq_id)
539{
540 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
541 struct iwl_cmd_meta *out_meta;
542 struct iwl_txq *txq = trans_pcie->txq[txq_id];
543 u16 cmd_len;
544 int idx;
545 void *tfd;
546
547 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
548 "TX on unused queue %d\n", txq_id))
549 return -EINVAL;
550
551 if (skb_is_nonlinear(skb) &&
552 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
553 __skb_linearize(skb))
554 return -ENOMEM;
555
556 spin_lock(&txq->lock);
557
558 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
559 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
560 (void *)dev_cmd->payload;
561
562 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
563 } else {
564 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
565 (void *)dev_cmd->payload;
566
567 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
568 }
569
570 if (iwl_queue_space(trans, txq) < txq->high_mark) {
571 iwl_stop_queue(trans, txq);
572
573
574 if (unlikely(iwl_queue_space(trans, txq) < 3)) {
575 struct iwl_device_cmd **dev_cmd_ptr;
576
577 dev_cmd_ptr = (void *)((u8 *)skb->cb +
578 trans_pcie->dev_cmd_offs);
579
580 *dev_cmd_ptr = dev_cmd;
581 __skb_queue_tail(&txq->overflow_q, skb);
582 spin_unlock(&txq->lock);
583 return 0;
584 }
585 }
586
587 idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
588
589
590 txq->entries[idx].skb = skb;
591 txq->entries[idx].cmd = dev_cmd;
592
593 dev_cmd->hdr.sequence =
594 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
595 INDEX_TO_SEQ(idx)));
596
597
598 out_meta = &txq->entries[idx].meta;
599 out_meta->flags = 0;
600
601 tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
602 if (!tfd) {
603 spin_unlock(&txq->lock);
604 return -1;
605 }
606
607
608 iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
609 iwl_pcie_gen2_get_num_tbs(trans, tfd));
610
611
612 if (txq->read_ptr == txq->write_ptr) {
613 if (txq->wd_timeout)
614 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
615 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
616 iwl_trans_ref(trans);
617 }
618
619
620 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
621 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
622
623
624
625
626 spin_unlock(&txq->lock);
627 return 0;
628}
629
630
631
632
633
634
635
636
637
638
639
640
641static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
642 struct iwl_host_cmd *cmd)
643{
644 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
645 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
646 struct iwl_device_cmd *out_cmd;
647 struct iwl_cmd_meta *out_meta;
648 unsigned long flags;
649 void *dup_buf = NULL;
650 dma_addr_t phys_addr;
651 int i, cmd_pos, idx;
652 u16 copy_size, cmd_size, tb0_size;
653 bool had_nocopy = false;
654 u8 group_id = iwl_cmd_groupid(cmd->id);
655 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
656 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
657 struct iwl_tfh_tfd *tfd;
658
659 copy_size = sizeof(struct iwl_cmd_header_wide);
660 cmd_size = sizeof(struct iwl_cmd_header_wide);
661
662 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
663 cmddata[i] = cmd->data[i];
664 cmdlen[i] = cmd->len[i];
665
666 if (!cmd->len[i])
667 continue;
668
669
670 if (copy_size < IWL_FIRST_TB_SIZE) {
671 int copy = IWL_FIRST_TB_SIZE - copy_size;
672
673 if (copy > cmdlen[i])
674 copy = cmdlen[i];
675 cmdlen[i] -= copy;
676 cmddata[i] += copy;
677 copy_size += copy;
678 }
679
680 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
681 had_nocopy = true;
682 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
683 idx = -EINVAL;
684 goto free_dup_buf;
685 }
686 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
687
688
689
690
691 had_nocopy = true;
692
693
694 if (WARN_ON(dup_buf)) {
695 idx = -EINVAL;
696 goto free_dup_buf;
697 }
698
699 dup_buf = kmemdup(cmddata[i], cmdlen[i],
700 GFP_ATOMIC);
701 if (!dup_buf)
702 return -ENOMEM;
703 } else {
704
705 if (WARN_ON(had_nocopy)) {
706 idx = -EINVAL;
707 goto free_dup_buf;
708 }
709 copy_size += cmdlen[i];
710 }
711 cmd_size += cmd->len[i];
712 }
713
714
715
716
717
718
719 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
720 "Command %s (%#x) is too large (%d bytes)\n",
721 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
722 idx = -EINVAL;
723 goto free_dup_buf;
724 }
725
726 spin_lock_bh(&txq->lock);
727
728 idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
729 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
730 memset(tfd, 0, sizeof(*tfd));
731
732 if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
733 spin_unlock_bh(&txq->lock);
734
735 IWL_ERR(trans, "No space in command queue\n");
736 iwl_op_mode_cmd_queue_full(trans->op_mode);
737 idx = -ENOSPC;
738 goto free_dup_buf;
739 }
740
741 out_cmd = txq->entries[idx].cmd;
742 out_meta = &txq->entries[idx].meta;
743
744
745 memset(out_meta, 0, sizeof(*out_meta));
746 if (cmd->flags & CMD_WANT_SKB)
747 out_meta->source = cmd;
748
749
750 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
751 out_cmd->hdr_wide.group_id = group_id;
752 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
753 out_cmd->hdr_wide.length =
754 cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
755 out_cmd->hdr_wide.reserved = 0;
756 out_cmd->hdr_wide.sequence =
757 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
758 INDEX_TO_SEQ(txq->write_ptr));
759
760 cmd_pos = sizeof(struct iwl_cmd_header_wide);
761 copy_size = sizeof(struct iwl_cmd_header_wide);
762
763
764 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
765 int copy;
766
767 if (!cmd->len[i])
768 continue;
769
770
771 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
772 IWL_HCMD_DFL_DUP))) {
773 copy = cmd->len[i];
774
775 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
776 cmd_pos += copy;
777 copy_size += copy;
778 continue;
779 }
780
781
782
783
784
785
786 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
787
788 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
789 cmd_pos += copy;
790
791
792 if (copy_size < IWL_FIRST_TB_SIZE) {
793 copy = IWL_FIRST_TB_SIZE - copy_size;
794
795 if (copy > cmd->len[i])
796 copy = cmd->len[i];
797 copy_size += copy;
798 }
799 }
800
801 IWL_DEBUG_HC(trans,
802 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
803 iwl_get_cmd_string(trans, cmd->id), group_id,
804 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
805 cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
806
807
808 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
809 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
810 iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
811 tb0_size);
812
813
814 if (copy_size > tb0_size) {
815 phys_addr = dma_map_single(trans->dev,
816 ((u8 *)&out_cmd->hdr) + tb0_size,
817 copy_size - tb0_size,
818 DMA_TO_DEVICE);
819 if (dma_mapping_error(trans->dev, phys_addr)) {
820 idx = -ENOMEM;
821 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
822 goto out;
823 }
824 iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
825 copy_size - tb0_size);
826 }
827
828
829 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
830 const void *data = cmddata[i];
831
832 if (!cmdlen[i])
833 continue;
834 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
835 IWL_HCMD_DFL_DUP)))
836 continue;
837 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
838 data = dup_buf;
839 phys_addr = dma_map_single(trans->dev, (void *)data,
840 cmdlen[i], DMA_TO_DEVICE);
841 if (dma_mapping_error(trans->dev, phys_addr)) {
842 idx = -ENOMEM;
843 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
844 goto out;
845 }
846 iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
847 }
848
849 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
850 out_meta->flags = cmd->flags;
851 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
852 kzfree(txq->entries[idx].free_buf);
853 txq->entries[idx].free_buf = dup_buf;
854
855 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
856
857
858 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
859 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
860
861 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
862 if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
863 !trans_pcie->ref_cmd_in_flight) {
864 trans_pcie->ref_cmd_in_flight = true;
865 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
866 iwl_trans_ref(trans);
867 }
868
869 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
870 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
871 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
872
873out:
874 spin_unlock_bh(&txq->lock);
875free_dup_buf:
876 if (idx < 0)
877 kfree(dup_buf);
878 return idx;
879}
880
881#define HOST_COMPLETE_TIMEOUT (2 * HZ)
882
883static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
884 struct iwl_host_cmd *cmd)
885{
886 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
887 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
888 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
889 int cmd_idx;
890 int ret;
891
892 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
893
894 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
895 &trans->status),
896 "Command %s: a command is already active!\n", cmd_str))
897 return -EIO;
898
899 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
900
901 if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
902 ret = wait_event_timeout(trans_pcie->d0i3_waitq,
903 pm_runtime_active(&trans_pcie->pci_dev->dev),
904 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
905 if (!ret) {
906 IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
907 return -ETIMEDOUT;
908 }
909 }
910
911 cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
912 if (cmd_idx < 0) {
913 ret = cmd_idx;
914 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
915 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
916 cmd_str, ret);
917 return ret;
918 }
919
920 ret = wait_event_timeout(trans_pcie->wait_command_queue,
921 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
922 &trans->status),
923 HOST_COMPLETE_TIMEOUT);
924 if (!ret) {
925 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
926 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
927
928 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
929 txq->read_ptr, txq->write_ptr);
930
931 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
932 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
933 cmd_str);
934 ret = -ETIMEDOUT;
935
936 iwl_force_nmi(trans);
937 iwl_trans_fw_error(trans);
938
939 goto cancel;
940 }
941
942 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
943 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
944 dump_stack();
945 ret = -EIO;
946 goto cancel;
947 }
948
949 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
950 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
951 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
952 ret = -ERFKILL;
953 goto cancel;
954 }
955
956 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
957 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
958 ret = -EIO;
959 goto cancel;
960 }
961
962 return 0;
963
964cancel:
965 if (cmd->flags & CMD_WANT_SKB) {
966
967
968
969
970
971
972 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
973 }
974
975 if (cmd->resp_pkt) {
976 iwl_free_resp(cmd);
977 cmd->resp_pkt = NULL;
978 }
979
980 return ret;
981}
982
983int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
984 struct iwl_host_cmd *cmd)
985{
986 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
987 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
988 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
989 cmd->id);
990 return -ERFKILL;
991 }
992
993 if (cmd->flags & CMD_ASYNC) {
994 int ret;
995
996
997 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
998 return -EINVAL;
999
1000 ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
1001 if (ret < 0) {
1002 IWL_ERR(trans,
1003 "Error sending %s: enqueue_hcmd failed: %d\n",
1004 iwl_get_cmd_string(trans, cmd->id), ret);
1005 return ret;
1006 }
1007 return 0;
1008 }
1009
1010 return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
1011}
1012
1013
1014
1015
1016void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
1017{
1018 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1019 struct iwl_txq *txq = trans_pcie->txq[txq_id];
1020
1021 spin_lock_bh(&txq->lock);
1022 while (txq->write_ptr != txq->read_ptr) {
1023 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
1024 txq_id, txq->read_ptr);
1025
1026 if (txq_id != trans_pcie->cmd_queue) {
1027 int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1028 struct sk_buff *skb = txq->entries[idx].skb;
1029
1030 if (WARN_ON_ONCE(!skb))
1031 continue;
1032
1033 iwl_pcie_free_tso_page(trans_pcie, skb);
1034 }
1035 iwl_pcie_gen2_free_tfd(trans, txq);
1036 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
1037
1038 if (txq->read_ptr == txq->write_ptr) {
1039 unsigned long flags;
1040
1041 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1042 if (txq_id != trans_pcie->cmd_queue) {
1043 IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
1044 txq->id);
1045 iwl_trans_unref(trans);
1046 } else if (trans_pcie->ref_cmd_in_flight) {
1047 trans_pcie->ref_cmd_in_flight = false;
1048 IWL_DEBUG_RPM(trans,
1049 "clear ref_cmd_in_flight\n");
1050 iwl_trans_unref(trans);
1051 }
1052 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1053 }
1054 }
1055
1056 while (!skb_queue_empty(&txq->overflow_q)) {
1057 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
1058
1059 iwl_op_mode_free_skb(trans->op_mode, skb);
1060 }
1061
1062 spin_unlock_bh(&txq->lock);
1063
1064
1065 iwl_wake_queue(trans, txq);
1066}
1067
1068static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1069 struct iwl_txq *txq)
1070{
1071 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1072 struct device *dev = trans->dev;
1073
1074
1075 if (txq->tfds) {
1076 dma_free_coherent(dev,
1077 trans_pcie->tfd_size * txq->n_window,
1078 txq->tfds, txq->dma_addr);
1079 dma_free_coherent(dev,
1080 sizeof(*txq->first_tb_bufs) * txq->n_window,
1081 txq->first_tb_bufs, txq->first_tb_dma);
1082 }
1083
1084 kfree(txq->entries);
1085 iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
1086 kfree(txq);
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
1098{
1099 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1100 struct iwl_txq *txq = trans_pcie->txq[txq_id];
1101 int i;
1102
1103 if (WARN_ON(!txq))
1104 return;
1105
1106 iwl_pcie_gen2_txq_unmap(trans, txq_id);
1107
1108
1109 if (txq_id == trans_pcie->cmd_queue)
1110 for (i = 0; i < txq->n_window; i++) {
1111 kzfree(txq->entries[i].cmd);
1112 kzfree(txq->entries[i].free_buf);
1113 }
1114 del_timer_sync(&txq->stuck_timer);
1115
1116 iwl_pcie_gen2_txq_free_memory(trans, txq);
1117
1118 trans_pcie->txq[txq_id] = NULL;
1119
1120 clear_bit(txq_id, trans_pcie->queue_used);
1121}
1122
1123int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1124 struct iwl_tx_queue_cfg_cmd *cmd,
1125 int cmd_id, int size,
1126 unsigned int timeout)
1127{
1128 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1129 struct iwl_tx_queue_cfg_rsp *rsp;
1130 struct iwl_txq *txq;
1131 struct iwl_host_cmd hcmd = {
1132 .id = cmd_id,
1133 .len = { sizeof(*cmd) },
1134 .data = { cmd, },
1135 .flags = CMD_WANT_SKB,
1136 };
1137 int ret, qid;
1138 u32 wr_ptr;
1139
1140 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1141 if (!txq)
1142 return -ENOMEM;
1143 ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
1144 (trans->cfg->device_family >=
1145 IWL_DEVICE_FAMILY_22560) ?
1146 sizeof(struct iwl_gen3_bc_tbl) :
1147 sizeof(struct iwlagn_scd_bc_tbl));
1148 if (ret) {
1149 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1150 kfree(txq);
1151 return -ENOMEM;
1152 }
1153
1154 ret = iwl_pcie_txq_alloc(trans, txq, size, false);
1155 if (ret) {
1156 IWL_ERR(trans, "Tx queue alloc failed\n");
1157 goto error;
1158 }
1159 ret = iwl_pcie_txq_init(trans, txq, size, false);
1160 if (ret) {
1161 IWL_ERR(trans, "Tx queue init failed\n");
1162 goto error;
1163 }
1164
1165 txq->wd_timeout = msecs_to_jiffies(timeout);
1166
1167 cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
1168 cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1169 cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1170
1171 ret = iwl_trans_send_cmd(trans, &hcmd);
1172 if (ret)
1173 goto error;
1174
1175 if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
1176 ret = -EINVAL;
1177 goto error_free_resp;
1178 }
1179
1180 rsp = (void *)hcmd.resp_pkt->data;
1181 qid = le16_to_cpu(rsp->queue_number);
1182 wr_ptr = le16_to_cpu(rsp->write_pointer);
1183
1184 if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
1185 WARN_ONCE(1, "queue index %d unsupported", qid);
1186 ret = -EIO;
1187 goto error_free_resp;
1188 }
1189
1190 if (test_and_set_bit(qid, trans_pcie->queue_used)) {
1191 WARN_ONCE(1, "queue %d already used", qid);
1192 ret = -EIO;
1193 goto error_free_resp;
1194 }
1195
1196 txq->id = qid;
1197 trans_pcie->txq[qid] = txq;
1198 wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
1199
1200
1201 txq->read_ptr = wr_ptr;
1202 txq->write_ptr = wr_ptr;
1203 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1204 (txq->write_ptr) | (qid << 16));
1205 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1206
1207 iwl_free_resp(&hcmd);
1208 return qid;
1209
1210error_free_resp:
1211 iwl_free_resp(&hcmd);
1212error:
1213 iwl_pcie_gen2_txq_free_memory(trans, txq);
1214 return ret;
1215}
1216
1217void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
1218{
1219 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1220
1221
1222
1223
1224
1225
1226
1227 if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
1228 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1229 "queue %d not used", queue);
1230 return;
1231 }
1232
1233 iwl_pcie_gen2_txq_unmap(trans, queue);
1234
1235 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1236}
1237
1238void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
1239{
1240 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1241 int i;
1242
1243 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1244
1245
1246 for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
1247 if (!trans_pcie->txq[i])
1248 continue;
1249
1250 iwl_pcie_gen2_txq_free(trans, i);
1251 }
1252}
1253
1254int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
1255{
1256 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1257 struct iwl_txq *cmd_queue;
1258 int txq_id = trans_pcie->cmd_queue, ret;
1259
1260
1261 if (!trans_pcie->txq[txq_id]) {
1262 cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
1263 if (!cmd_queue) {
1264 IWL_ERR(trans, "Not enough memory for command queue\n");
1265 return -ENOMEM;
1266 }
1267 trans_pcie->txq[txq_id] = cmd_queue;
1268 ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
1269 if (ret) {
1270 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1271 goto error;
1272 }
1273 } else {
1274 cmd_queue = trans_pcie->txq[txq_id];
1275 }
1276
1277 ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
1278 if (ret) {
1279 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1280 goto error;
1281 }
1282 trans_pcie->txq[txq_id]->id = txq_id;
1283 set_bit(txq_id, trans_pcie->queue_used);
1284
1285 return 0;
1286
1287error:
1288 iwl_pcie_gen2_tx_free(trans);
1289 return ret;
1290}
1291
1292