1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/etherdevice.h>
19#include "htt.h"
20#include "mac.h"
21#include "hif.h"
22#include "txrx.h"
23#include "debug.h"
24
25static u8 ath10k_htt_tx_txq_calc_size(size_t count)
26{
27 int exp;
28 int factor;
29
30 exp = 0;
31 factor = count >> 7;
32
33 while (factor >= 64 && exp < 4) {
34 factor >>= 3;
35 exp++;
36 }
37
38 if (exp == 4)
39 return 0xff;
40
41 if (count > 0)
42 factor = max(1, factor);
43
44 return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
45 SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
46}
47
48static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
49 struct ieee80211_txq *txq)
50{
51 struct ath10k *ar = hw->priv;
52 struct ath10k_sta *arsta;
53 struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
54 unsigned long frame_cnt;
55 unsigned long byte_cnt;
56 int idx;
57 u32 bit;
58 u16 peer_id;
59 u8 tid;
60 u8 count;
61
62 lockdep_assert_held(&ar->htt.tx_lock);
63
64 if (!ar->htt.tx_q_state.enabled)
65 return;
66
67 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
68 return;
69
70 if (txq->sta) {
71 arsta = (void *)txq->sta->drv_priv;
72 peer_id = arsta->peer_id;
73 } else {
74 peer_id = arvif->peer_id;
75 }
76
77 tid = txq->tid;
78 bit = BIT(peer_id % 32);
79 idx = peer_id / 32;
80
81 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
82 count = ath10k_htt_tx_txq_calc_size(byte_cnt);
83
84 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
85 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
86 ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
87 peer_id, tid);
88 return;
89 }
90
91 ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
92 ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
93 ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
94
95 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
96 peer_id, tid, count);
97}
98
99static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
100{
101 u32 seq;
102 size_t size;
103
104 lockdep_assert_held(&ar->htt.tx_lock);
105
106 if (!ar->htt.tx_q_state.enabled)
107 return;
108
109 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
110 return;
111
112 seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
113 seq++;
114 ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
115
116 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
117 seq);
118
119 size = sizeof(*ar->htt.tx_q_state.vaddr);
120 dma_sync_single_for_device(ar->dev,
121 ar->htt.tx_q_state.paddr,
122 size,
123 DMA_TO_DEVICE);
124}
125
126void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
127 struct ieee80211_txq *txq)
128{
129 struct ath10k *ar = hw->priv;
130
131 spin_lock_bh(&ar->htt.tx_lock);
132 __ath10k_htt_tx_txq_recalc(hw, txq);
133 spin_unlock_bh(&ar->htt.tx_lock);
134}
135
136void ath10k_htt_tx_txq_sync(struct ath10k *ar)
137{
138 spin_lock_bh(&ar->htt.tx_lock);
139 __ath10k_htt_tx_txq_sync(ar);
140 spin_unlock_bh(&ar->htt.tx_lock);
141}
142
143void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
144 struct ieee80211_txq *txq)
145{
146 struct ath10k *ar = hw->priv;
147
148 spin_lock_bh(&ar->htt.tx_lock);
149 __ath10k_htt_tx_txq_recalc(hw, txq);
150 __ath10k_htt_tx_txq_sync(ar);
151 spin_unlock_bh(&ar->htt.tx_lock);
152}
153
154void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
155{
156 lockdep_assert_held(&htt->tx_lock);
157
158 htt->num_pending_tx--;
159 if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
160 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
161}
162
163int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
164{
165 lockdep_assert_held(&htt->tx_lock);
166
167 if (htt->num_pending_tx >= htt->max_num_pending_tx)
168 return -EBUSY;
169
170 htt->num_pending_tx++;
171 if (htt->num_pending_tx == htt->max_num_pending_tx)
172 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
173
174 return 0;
175}
176
177int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
178 bool is_presp)
179{
180 struct ath10k *ar = htt->ar;
181
182 lockdep_assert_held(&htt->tx_lock);
183
184 if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
185 return 0;
186
187 if (is_presp &&
188 ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
189 return -EBUSY;
190
191 htt->num_pending_mgmt_tx++;
192
193 return 0;
194}
195
196void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
197{
198 lockdep_assert_held(&htt->tx_lock);
199
200 if (!htt->ar->hw_params.max_probe_resp_desc_thres)
201 return;
202
203 htt->num_pending_mgmt_tx--;
204}
205
206int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
207{
208 struct ath10k *ar = htt->ar;
209 int ret;
210
211 lockdep_assert_held(&htt->tx_lock);
212
213 ret = idr_alloc(&htt->pending_tx, skb, 0,
214 htt->max_num_pending_tx, GFP_ATOMIC);
215
216 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
217
218 return ret;
219}
220
221void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
222{
223 struct ath10k *ar = htt->ar;
224
225 lockdep_assert_held(&htt->tx_lock);
226
227 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
228
229 idr_remove(&htt->pending_tx, msdu_id);
230}
231
232static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
233{
234 struct ath10k *ar = htt->ar;
235 size_t size;
236
237 if (!htt->txbuf.vaddr)
238 return;
239
240 size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
241 dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
242}
243
244static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
245{
246 struct ath10k *ar = htt->ar;
247 size_t size;
248
249 size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
250 htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr,
251 GFP_KERNEL);
252 if (!htt->txbuf.vaddr)
253 return -ENOMEM;
254
255 return 0;
256}
257
258static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
259{
260 size_t size;
261
262 if (!htt->frag_desc.vaddr)
263 return;
264
265 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
266
267 dma_free_coherent(htt->ar->dev,
268 size,
269 htt->frag_desc.vaddr,
270 htt->frag_desc.paddr);
271}
272
273static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
274{
275 struct ath10k *ar = htt->ar;
276 size_t size;
277
278 if (!ar->hw_params.continuous_frag_desc)
279 return 0;
280
281 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
282 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
283 &htt->frag_desc.paddr,
284 GFP_KERNEL);
285 if (!htt->frag_desc.vaddr)
286 return -ENOMEM;
287
288 return 0;
289}
290
291static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
292{
293 struct ath10k *ar = htt->ar;
294 size_t size;
295
296 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
297 ar->running_fw->fw_file.fw_features))
298 return;
299
300 size = sizeof(*htt->tx_q_state.vaddr);
301
302 dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
303 kfree(htt->tx_q_state.vaddr);
304}
305
306static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
307{
308 struct ath10k *ar = htt->ar;
309 size_t size;
310 int ret;
311
312 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
313 ar->running_fw->fw_file.fw_features))
314 return 0;
315
316 htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
317 htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
318 htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
319
320 size = sizeof(*htt->tx_q_state.vaddr);
321 htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
322 if (!htt->tx_q_state.vaddr)
323 return -ENOMEM;
324
325 htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
326 size, DMA_TO_DEVICE);
327 ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
328 if (ret) {
329 ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
330 kfree(htt->tx_q_state.vaddr);
331 return -EIO;
332 }
333
334 return 0;
335}
336
337static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
338{
339 WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
340 kfifo_free(&htt->txdone_fifo);
341}
342
343static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
344{
345 int ret;
346 size_t size;
347
348 size = roundup_pow_of_two(htt->max_num_pending_tx);
349 ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
350 return ret;
351}
352
353static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
354{
355 struct ath10k *ar = htt->ar;
356 int ret;
357
358 ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
359 if (ret) {
360 ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
361 return ret;
362 }
363
364 ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
365 if (ret) {
366 ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
367 goto free_txbuf;
368 }
369
370 ret = ath10k_htt_tx_alloc_txq(htt);
371 if (ret) {
372 ath10k_err(ar, "failed to alloc txq: %d\n", ret);
373 goto free_frag_desc;
374 }
375
376 ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
377 if (ret) {
378 ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
379 goto free_txq;
380 }
381
382 return 0;
383
384free_txq:
385 ath10k_htt_tx_free_txq(htt);
386
387free_frag_desc:
388 ath10k_htt_tx_free_cont_frag_desc(htt);
389
390free_txbuf:
391 ath10k_htt_tx_free_cont_txbuf(htt);
392
393 return ret;
394}
395
396int ath10k_htt_tx_start(struct ath10k_htt *htt)
397{
398 struct ath10k *ar = htt->ar;
399 int ret;
400
401 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
402 htt->max_num_pending_tx);
403
404 spin_lock_init(&htt->tx_lock);
405 idr_init(&htt->pending_tx);
406
407 if (htt->tx_mem_allocated)
408 return 0;
409
410 ret = ath10k_htt_tx_alloc_buf(htt);
411 if (ret)
412 goto free_idr_pending_tx;
413
414 htt->tx_mem_allocated = true;
415
416 return 0;
417
418free_idr_pending_tx:
419 idr_destroy(&htt->pending_tx);
420
421 return ret;
422}
423
424static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
425{
426 struct ath10k *ar = ctx;
427 struct ath10k_htt *htt = &ar->htt;
428 struct htt_tx_done tx_done = {0};
429
430 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
431
432 tx_done.msdu_id = msdu_id;
433 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
434
435 ath10k_txrx_tx_unref(htt, &tx_done);
436
437 return 0;
438}
439
440void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
441{
442 if (!htt->tx_mem_allocated)
443 return;
444
445 ath10k_htt_tx_free_cont_txbuf(htt);
446 ath10k_htt_tx_free_txq(htt);
447 ath10k_htt_tx_free_cont_frag_desc(htt);
448 ath10k_htt_tx_free_txdone_fifo(htt);
449 htt->tx_mem_allocated = false;
450}
451
452void ath10k_htt_tx_stop(struct ath10k_htt *htt)
453{
454 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
455 idr_destroy(&htt->pending_tx);
456}
457
458void ath10k_htt_tx_free(struct ath10k_htt *htt)
459{
460 ath10k_htt_tx_stop(htt);
461 ath10k_htt_tx_destroy(htt);
462}
463
464void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
465{
466 dev_kfree_skb_any(skb);
467}
468
469void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
470{
471 dev_kfree_skb_any(skb);
472}
473EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
474
475int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
476{
477 struct ath10k *ar = htt->ar;
478 struct sk_buff *skb;
479 struct htt_cmd *cmd;
480 int len = 0;
481 int ret;
482
483 len += sizeof(cmd->hdr);
484 len += sizeof(cmd->ver_req);
485
486 skb = ath10k_htc_alloc_skb(ar, len);
487 if (!skb)
488 return -ENOMEM;
489
490 skb_put(skb, len);
491 cmd = (struct htt_cmd *)skb->data;
492 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
493
494 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
495 if (ret) {
496 dev_kfree_skb_any(skb);
497 return ret;
498 }
499
500 return 0;
501}
502
503int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
504{
505 struct ath10k *ar = htt->ar;
506 struct htt_stats_req *req;
507 struct sk_buff *skb;
508 struct htt_cmd *cmd;
509 int len = 0, ret;
510
511 len += sizeof(cmd->hdr);
512 len += sizeof(cmd->stats_req);
513
514 skb = ath10k_htc_alloc_skb(ar, len);
515 if (!skb)
516 return -ENOMEM;
517
518 skb_put(skb, len);
519 cmd = (struct htt_cmd *)skb->data;
520 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
521
522 req = &cmd->stats_req;
523
524 memset(req, 0, sizeof(*req));
525
526
527
528 req->upload_types[0] = mask;
529 req->reset_types[0] = mask;
530 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
531 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
532 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
533
534 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
535 if (ret) {
536 ath10k_warn(ar, "failed to send htt type stats request: %d",
537 ret);
538 dev_kfree_skb_any(skb);
539 return ret;
540 }
541
542 return 0;
543}
544
545int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
546{
547 struct ath10k *ar = htt->ar;
548 struct sk_buff *skb;
549 struct htt_cmd *cmd;
550 struct htt_frag_desc_bank_cfg *cfg;
551 int ret, size;
552 u8 info;
553
554 if (!ar->hw_params.continuous_frag_desc)
555 return 0;
556
557 if (!htt->frag_desc.paddr) {
558 ath10k_warn(ar, "invalid frag desc memory\n");
559 return -EINVAL;
560 }
561
562 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
563 skb = ath10k_htc_alloc_skb(ar, size);
564 if (!skb)
565 return -ENOMEM;
566
567 skb_put(skb, size);
568 cmd = (struct htt_cmd *)skb->data;
569 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
570
571 info = 0;
572 info |= SM(htt->tx_q_state.type,
573 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
574
575 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
576 ar->running_fw->fw_file.fw_features))
577 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
578
579 cfg = &cmd->frag_desc_bank_cfg;
580 cfg->info = info;
581 cfg->num_banks = 1;
582 cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
583 cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
584 cfg->bank_id[0].bank_min_id = 0;
585 cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
586 1);
587
588 cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
589 cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
590 cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
591 cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
592 cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
593
594 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
595
596 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
597 if (ret) {
598 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
599 ret);
600 dev_kfree_skb_any(skb);
601 return ret;
602 }
603
604 return 0;
605}
606
607int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
608{
609 struct ath10k *ar = htt->ar;
610 struct sk_buff *skb;
611 struct htt_cmd *cmd;
612 struct htt_rx_ring_setup_ring *ring;
613 const int num_rx_ring = 1;
614 u16 flags;
615 u32 fw_idx;
616 int len;
617 int ret;
618
619
620
621
622
623 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
624 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
625
626 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
627 + (sizeof(*ring) * num_rx_ring);
628 skb = ath10k_htc_alloc_skb(ar, len);
629 if (!skb)
630 return -ENOMEM;
631
632 skb_put(skb, len);
633
634 cmd = (struct htt_cmd *)skb->data;
635 ring = &cmd->rx_setup.rings[0];
636
637 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
638 cmd->rx_setup.hdr.num_rings = 1;
639
640
641 flags = 0;
642 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
643 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
644 flags |= HTT_RX_RING_FLAGS_PPDU_START;
645 flags |= HTT_RX_RING_FLAGS_PPDU_END;
646 flags |= HTT_RX_RING_FLAGS_MPDU_START;
647 flags |= HTT_RX_RING_FLAGS_MPDU_END;
648 flags |= HTT_RX_RING_FLAGS_MSDU_START;
649 flags |= HTT_RX_RING_FLAGS_MSDU_END;
650 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
651 flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
652 flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
653 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
654 flags |= HTT_RX_RING_FLAGS_CTRL_RX;
655 flags |= HTT_RX_RING_FLAGS_MGMT_RX;
656 flags |= HTT_RX_RING_FLAGS_NULL_RX;
657 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
658
659 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
660
661 ring->fw_idx_shadow_reg_paddr =
662 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
663 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
664 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
665 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
666 ring->flags = __cpu_to_le16(flags);
667 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
668
669#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
670
671 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
672 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
673 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
674 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
675 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
676 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
677 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
678 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
679 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
680 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
681
682#undef desc_offset
683
684 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
685 if (ret) {
686 dev_kfree_skb_any(skb);
687 return ret;
688 }
689
690 return 0;
691}
692
693int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
694 u8 max_subfrms_ampdu,
695 u8 max_subfrms_amsdu)
696{
697 struct ath10k *ar = htt->ar;
698 struct htt_aggr_conf *aggr_conf;
699 struct sk_buff *skb;
700 struct htt_cmd *cmd;
701 int len;
702 int ret;
703
704
705
706 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
707 return -EINVAL;
708
709 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
710 return -EINVAL;
711
712 len = sizeof(cmd->hdr);
713 len += sizeof(cmd->aggr_conf);
714
715 skb = ath10k_htc_alloc_skb(ar, len);
716 if (!skb)
717 return -ENOMEM;
718
719 skb_put(skb, len);
720 cmd = (struct htt_cmd *)skb->data;
721 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
722
723 aggr_conf = &cmd->aggr_conf;
724 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
725 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
726
727 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
728 aggr_conf->max_num_amsdu_subframes,
729 aggr_conf->max_num_ampdu_subframes);
730
731 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
732 if (ret) {
733 dev_kfree_skb_any(skb);
734 return ret;
735 }
736
737 return 0;
738}
739
740int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
741 __le32 token,
742 __le16 fetch_seq_num,
743 struct htt_tx_fetch_record *records,
744 size_t num_records)
745{
746 struct sk_buff *skb;
747 struct htt_cmd *cmd;
748 const u16 resp_id = 0;
749 int len = 0;
750 int ret;
751
752
753
754
755
756 len += sizeof(cmd->hdr);
757 len += sizeof(cmd->tx_fetch_resp);
758 len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
759
760 skb = ath10k_htc_alloc_skb(ar, len);
761 if (!skb)
762 return -ENOMEM;
763
764 skb_put(skb, len);
765 cmd = (struct htt_cmd *)skb->data;
766 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
767 cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
768 cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
769 cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
770 cmd->tx_fetch_resp.token = token;
771
772 memcpy(cmd->tx_fetch_resp.records, records,
773 sizeof(records[0]) * num_records);
774
775 ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
776 if (ret) {
777 ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
778 goto err_free_skb;
779 }
780
781 return 0;
782
783err_free_skb:
784 dev_kfree_skb_any(skb);
785
786 return ret;
787}
788
789static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
790{
791 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
792 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
793 struct ath10k_vif *arvif;
794
795 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
796 return ar->scan.vdev_id;
797 } else if (cb->vif) {
798 arvif = (void *)cb->vif->drv_priv;
799 return arvif->vdev_id;
800 } else if (ar->monitor_started) {
801 return ar->monitor_vdev_id;
802 } else {
803 return 0;
804 }
805}
806
807static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
808{
809 struct ieee80211_hdr *hdr = (void *)skb->data;
810 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
811
812 if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
813 return HTT_DATA_TX_EXT_TID_MGMT;
814 else if (cb->flags & ATH10K_SKB_F_QOS)
815 return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
816 else
817 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
818}
819
820int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
821{
822 struct ath10k *ar = htt->ar;
823 struct device *dev = ar->dev;
824 struct sk_buff *txdesc = NULL;
825 struct htt_cmd *cmd;
826 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
827 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
828 int len = 0;
829 int msdu_id = -1;
830 int res;
831 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
832
833 len += sizeof(cmd->hdr);
834 len += sizeof(cmd->mgmt_tx);
835
836 spin_lock_bh(&htt->tx_lock);
837 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
838 spin_unlock_bh(&htt->tx_lock);
839 if (res < 0)
840 goto err;
841
842 msdu_id = res;
843
844 if ((ieee80211_is_action(hdr->frame_control) ||
845 ieee80211_is_deauth(hdr->frame_control) ||
846 ieee80211_is_disassoc(hdr->frame_control)) &&
847 ieee80211_has_protected(hdr->frame_control)) {
848 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
849 }
850
851 txdesc = ath10k_htc_alloc_skb(ar, len);
852 if (!txdesc) {
853 res = -ENOMEM;
854 goto err_free_msdu_id;
855 }
856
857 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
858 DMA_TO_DEVICE);
859 res = dma_mapping_error(dev, skb_cb->paddr);
860 if (res) {
861 res = -EIO;
862 goto err_free_txdesc;
863 }
864
865 skb_put(txdesc, len);
866 cmd = (struct htt_cmd *)txdesc->data;
867 memset(cmd, 0, len);
868
869 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
870 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
871 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
872 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
873 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
874 memcpy(cmd->mgmt_tx.hdr, msdu->data,
875 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
876
877 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
878 if (res)
879 goto err_unmap_msdu;
880
881 return 0;
882
883err_unmap_msdu:
884 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
885err_free_txdesc:
886 dev_kfree_skb_any(txdesc);
887err_free_msdu_id:
888 spin_lock_bh(&htt->tx_lock);
889 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
890 spin_unlock_bh(&htt->tx_lock);
891err:
892 return res;
893}
894
895int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
896 struct sk_buff *msdu)
897{
898 struct ath10k *ar = htt->ar;
899 struct device *dev = ar->dev;
900 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
901 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
902 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
903 struct ath10k_hif_sg_item sg_items[2];
904 struct ath10k_htt_txbuf *txbuf;
905 struct htt_data_tx_desc_frag *frags;
906 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
907 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
908 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
909 int prefetch_len;
910 int res;
911 u8 flags0 = 0;
912 u16 msdu_id, flags1 = 0;
913 u16 freq = 0;
914 u32 frags_paddr = 0;
915 u32 txbuf_paddr;
916 struct htt_msdu_ext_desc *ext_desc = NULL;
917
918 spin_lock_bh(&htt->tx_lock);
919 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
920 spin_unlock_bh(&htt->tx_lock);
921 if (res < 0)
922 goto err;
923
924 msdu_id = res;
925
926 prefetch_len = min(htt->prefetch_len, msdu->len);
927 prefetch_len = roundup(prefetch_len, 4);
928
929 txbuf = &htt->txbuf.vaddr[msdu_id];
930 txbuf_paddr = htt->txbuf.paddr +
931 (sizeof(struct ath10k_htt_txbuf) * msdu_id);
932
933 if ((ieee80211_is_action(hdr->frame_control) ||
934 ieee80211_is_deauth(hdr->frame_control) ||
935 ieee80211_is_disassoc(hdr->frame_control)) &&
936 ieee80211_has_protected(hdr->frame_control)) {
937 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
938 } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
939 txmode == ATH10K_HW_TXRX_RAW &&
940 ieee80211_has_protected(hdr->frame_control)) {
941 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
942 }
943
944 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
945 DMA_TO_DEVICE);
946 res = dma_mapping_error(dev, skb_cb->paddr);
947 if (res) {
948 res = -EIO;
949 goto err_free_msdu_id;
950 }
951
952 if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
953 freq = ar->scan.roc_freq;
954
955 switch (txmode) {
956 case ATH10K_HW_TXRX_RAW:
957 case ATH10K_HW_TXRX_NATIVE_WIFI:
958 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
959
960 case ATH10K_HW_TXRX_ETHERNET:
961 if (ar->hw_params.continuous_frag_desc) {
962 memset(&htt->frag_desc.vaddr[msdu_id], 0,
963 sizeof(struct htt_msdu_ext_desc));
964 frags = (struct htt_data_tx_desc_frag *)
965 &htt->frag_desc.vaddr[msdu_id].frags;
966 ext_desc = &htt->frag_desc.vaddr[msdu_id];
967 frags[0].tword_addr.paddr_lo =
968 __cpu_to_le32(skb_cb->paddr);
969 frags[0].tword_addr.paddr_hi = 0;
970 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
971
972 frags_paddr = htt->frag_desc.paddr +
973 (sizeof(struct htt_msdu_ext_desc) * msdu_id);
974 } else {
975 frags = txbuf->frags;
976 frags[0].dword_addr.paddr =
977 __cpu_to_le32(skb_cb->paddr);
978 frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
979 frags[1].dword_addr.paddr = 0;
980 frags[1].dword_addr.len = 0;
981
982 frags_paddr = txbuf_paddr;
983 }
984 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
985 break;
986 case ATH10K_HW_TXRX_MGMT:
987 flags0 |= SM(ATH10K_HW_TXRX_MGMT,
988 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
989 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
990
991 frags_paddr = skb_cb->paddr;
992 break;
993 }
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011 txbuf->htc_hdr.eid = htt->eid;
1012 txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
1013 sizeof(txbuf->cmd_tx) +
1014 prefetch_len);
1015 txbuf->htc_hdr.flags = 0;
1016
1017 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1018 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
1019
1020 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
1021 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1022 if (msdu->ip_summed == CHECKSUM_PARTIAL &&
1023 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1024 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
1025 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1026 if (ar->hw_params.continuous_frag_desc)
1027 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
1028 }
1029
1030
1031
1032
1033
1034 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
1035
1036 txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
1037 txbuf->cmd_tx.flags0 = flags0;
1038 txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
1039 txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
1040 txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
1041 txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
1042 if (ath10k_mac_tx_frm_has_freq(ar)) {
1043 txbuf->cmd_tx.offchan_tx.peerid =
1044 __cpu_to_le16(HTT_INVALID_PEERID);
1045 txbuf->cmd_tx.offchan_tx.freq =
1046 __cpu_to_le16(freq);
1047 } else {
1048 txbuf->cmd_tx.peerid =
1049 __cpu_to_le32(HTT_INVALID_PEERID);
1050 }
1051
1052 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1053 ath10k_dbg(ar, ATH10K_DBG_HTT,
1054 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
1055 flags0, flags1, msdu->len, msdu_id, frags_paddr,
1056 (u32)skb_cb->paddr, vdev_id, tid, freq);
1057 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1058 msdu->data, msdu->len);
1059 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
1060 trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1061
1062 sg_items[0].transfer_id = 0;
1063 sg_items[0].transfer_context = NULL;
1064 sg_items[0].vaddr = &txbuf->htc_hdr;
1065 sg_items[0].paddr = txbuf_paddr +
1066 sizeof(txbuf->frags);
1067 sg_items[0].len = sizeof(txbuf->htc_hdr) +
1068 sizeof(txbuf->cmd_hdr) +
1069 sizeof(txbuf->cmd_tx);
1070
1071 sg_items[1].transfer_id = 0;
1072 sg_items[1].transfer_context = NULL;
1073 sg_items[1].vaddr = msdu->data;
1074 sg_items[1].paddr = skb_cb->paddr;
1075 sg_items[1].len = prefetch_len;
1076
1077 res = ath10k_hif_tx_sg(htt->ar,
1078 htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
1079 sg_items, ARRAY_SIZE(sg_items));
1080 if (res)
1081 goto err_unmap_msdu;
1082
1083 return 0;
1084
1085err_unmap_msdu:
1086 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
1087err_free_msdu_id:
1088 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
1089err:
1090 return res;
1091}
1092