1
2
3
4
5
6#include "dp_rx.h"
7#include "debug.h"
8#include "hif.h"
9
10const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
11
12 {
13 .flags = CE_ATTR_FLAGS,
14 .src_nentries = 16,
15 .src_sz_max = 2048,
16 .dest_nentries = 0,
17 },
18
19
20 {
21 .flags = CE_ATTR_FLAGS,
22 .src_nentries = 0,
23 .src_sz_max = 2048,
24 .dest_nentries = 512,
25 .recv_cb = ath11k_htc_rx_completion_handler,
26 },
27
28
29 {
30 .flags = CE_ATTR_FLAGS,
31 .src_nentries = 0,
32 .src_sz_max = 2048,
33 .dest_nentries = 512,
34 .recv_cb = ath11k_htc_rx_completion_handler,
35 },
36
37
38 {
39 .flags = CE_ATTR_FLAGS,
40 .src_nentries = 32,
41 .src_sz_max = 2048,
42 .dest_nentries = 0,
43 },
44
45
46 {
47 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
48 .src_nentries = 2048,
49 .src_sz_max = 256,
50 .dest_nentries = 0,
51 },
52
53
54 {
55 .flags = CE_ATTR_FLAGS,
56 .src_nentries = 0,
57 .src_sz_max = 2048,
58 .dest_nentries = 512,
59 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
60 },
61
62
63 {
64 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
65 .src_nentries = 0,
66 .src_sz_max = 0,
67 .dest_nentries = 0,
68 },
69
70
71 {
72 .flags = CE_ATTR_FLAGS,
73 .src_nentries = 32,
74 .src_sz_max = 2048,
75 .dest_nentries = 0,
76 },
77
78
79 {
80 .flags = CE_ATTR_FLAGS,
81 .src_nentries = 0,
82 .src_sz_max = 0,
83 .dest_nentries = 0,
84 },
85
86
87 {
88 .flags = CE_ATTR_FLAGS,
89 .src_nentries = 32,
90 .src_sz_max = 2048,
91 .dest_nentries = 0,
92 },
93
94
95 {
96 .flags = CE_ATTR_FLAGS,
97 .src_nentries = 0,
98 .src_sz_max = 2048,
99 .dest_nentries = 512,
100 .recv_cb = ath11k_htc_rx_completion_handler,
101 },
102
103
104 {
105 .flags = CE_ATTR_FLAGS,
106 .src_nentries = 0,
107 .src_sz_max = 0,
108 .dest_nentries = 0,
109 },
110};
111
112const struct ce_attr ath11k_host_ce_config_qca6390[] = {
113
114 {
115 .flags = CE_ATTR_FLAGS,
116 .src_nentries = 16,
117 .src_sz_max = 2048,
118 .dest_nentries = 0,
119 },
120
121
122 {
123 .flags = CE_ATTR_FLAGS,
124 .src_nentries = 0,
125 .src_sz_max = 2048,
126 .dest_nentries = 512,
127 .recv_cb = ath11k_htc_rx_completion_handler,
128 },
129
130
131 {
132 .flags = CE_ATTR_FLAGS,
133 .src_nentries = 0,
134 .src_sz_max = 2048,
135 .dest_nentries = 512,
136 .recv_cb = ath11k_htc_rx_completion_handler,
137 },
138
139
140 {
141 .flags = CE_ATTR_FLAGS,
142 .src_nentries = 32,
143 .src_sz_max = 2048,
144 .dest_nentries = 0,
145 },
146
147
148 {
149 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
150 .src_nentries = 2048,
151 .src_sz_max = 256,
152 .dest_nentries = 0,
153 },
154
155
156 {
157 .flags = CE_ATTR_FLAGS,
158 .src_nentries = 0,
159 .src_sz_max = 2048,
160 .dest_nentries = 512,
161 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
162 },
163
164
165 {
166 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
167 .src_nentries = 0,
168 .src_sz_max = 0,
169 .dest_nentries = 0,
170 },
171
172
173 {
174 .flags = CE_ATTR_FLAGS,
175 .src_nentries = 32,
176 .src_sz_max = 2048,
177 .dest_nentries = 0,
178 },
179
180
181 {
182 .flags = CE_ATTR_FLAGS,
183 .src_nentries = 0,
184 .src_sz_max = 0,
185 .dest_nentries = 0,
186 },
187
188};
189
190const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
191
192 {
193 .flags = CE_ATTR_FLAGS,
194 .src_nentries = 16,
195 .src_sz_max = 2048,
196 .dest_nentries = 0,
197 },
198
199
200 {
201 .flags = CE_ATTR_FLAGS,
202 .src_nentries = 0,
203 .src_sz_max = 2048,
204 .dest_nentries = 512,
205 .recv_cb = ath11k_htc_rx_completion_handler,
206 },
207
208
209 {
210 .flags = CE_ATTR_FLAGS,
211 .src_nentries = 0,
212 .src_sz_max = 2048,
213 .dest_nentries = 32,
214 .recv_cb = ath11k_htc_rx_completion_handler,
215 },
216
217
218 {
219 .flags = CE_ATTR_FLAGS,
220 .src_nentries = 32,
221 .src_sz_max = 2048,
222 .dest_nentries = 0,
223 },
224
225
226 {
227 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
228 .src_nentries = 2048,
229 .src_sz_max = 256,
230 .dest_nentries = 0,
231 },
232
233
234 {
235 .flags = CE_ATTR_FLAGS,
236 .src_nentries = 0,
237 .src_sz_max = 2048,
238 .dest_nentries = 512,
239 .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
240 },
241};
242
243static bool ath11k_ce_need_shadow_fix(int ce_id)
244{
245
246 if (ce_id == 4)
247 return true;
248 return false;
249}
250
251void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
252{
253 int i;
254
255 if (!ab->hw_params.supports_shadow_regs)
256 return;
257
258 for (i = 0; i < ab->hw_params.ce_count; i++)
259 if (ath11k_ce_need_shadow_fix(i))
260 ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
261}
262
263static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
264 struct sk_buff *skb, dma_addr_t paddr)
265{
266 struct ath11k_base *ab = pipe->ab;
267 struct ath11k_ce_ring *ring = pipe->dest_ring;
268 struct hal_srng *srng;
269 unsigned int write_index;
270 unsigned int nentries_mask = ring->nentries_mask;
271 u32 *desc;
272 int ret;
273
274 lockdep_assert_held(&ab->ce.ce_lock);
275
276 write_index = ring->write_index;
277
278 srng = &ab->hal.srng_list[ring->hal_ring_id];
279
280 spin_lock_bh(&srng->lock);
281
282 ath11k_hal_srng_access_begin(ab, srng);
283
284 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
285 ret = -ENOSPC;
286 goto exit;
287 }
288
289 desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
290 if (!desc) {
291 ret = -ENOSPC;
292 goto exit;
293 }
294
295 ath11k_hal_ce_dst_set_desc(desc, paddr);
296
297 ring->skb[write_index] = skb;
298 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
299 ring->write_index = write_index;
300
301 pipe->rx_buf_needed--;
302
303 ret = 0;
304exit:
305 ath11k_hal_srng_access_end(ab, srng);
306
307 spin_unlock_bh(&srng->lock);
308
309 return ret;
310}
311
312static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
313{
314 struct ath11k_base *ab = pipe->ab;
315 struct sk_buff *skb;
316 dma_addr_t paddr;
317 int ret = 0;
318
319 if (!(pipe->dest_ring || pipe->status_ring))
320 return 0;
321
322 spin_lock_bh(&ab->ce.ce_lock);
323 while (pipe->rx_buf_needed) {
324 skb = dev_alloc_skb(pipe->buf_sz);
325 if (!skb) {
326 ret = -ENOMEM;
327 goto exit;
328 }
329
330 WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
331
332 paddr = dma_map_single(ab->dev, skb->data,
333 skb->len + skb_tailroom(skb),
334 DMA_FROM_DEVICE);
335 if (unlikely(dma_mapping_error(ab->dev, paddr))) {
336 ath11k_warn(ab, "failed to dma map ce rx buf\n");
337 dev_kfree_skb_any(skb);
338 ret = -EIO;
339 goto exit;
340 }
341
342 ATH11K_SKB_RXCB(skb)->paddr = paddr;
343
344 ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
345
346 if (ret) {
347 ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
348 dma_unmap_single(ab->dev, paddr,
349 skb->len + skb_tailroom(skb),
350 DMA_FROM_DEVICE);
351 dev_kfree_skb_any(skb);
352 goto exit;
353 }
354 }
355
356exit:
357 spin_unlock_bh(&ab->ce.ce_lock);
358 return ret;
359}
360
361static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
362 struct sk_buff **skb, int *nbytes)
363{
364 struct ath11k_base *ab = pipe->ab;
365 struct hal_srng *srng;
366 unsigned int sw_index;
367 unsigned int nentries_mask;
368 u32 *desc;
369 int ret = 0;
370
371 spin_lock_bh(&ab->ce.ce_lock);
372
373 sw_index = pipe->dest_ring->sw_index;
374 nentries_mask = pipe->dest_ring->nentries_mask;
375
376 srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
377
378 spin_lock_bh(&srng->lock);
379
380 ath11k_hal_srng_access_begin(ab, srng);
381
382 desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
383 if (!desc) {
384 ret = -EIO;
385 goto err;
386 }
387
388 *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
389 if (*nbytes == 0) {
390 ret = -EIO;
391 goto err;
392 }
393
394 *skb = pipe->dest_ring->skb[sw_index];
395 pipe->dest_ring->skb[sw_index] = NULL;
396
397 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
398 pipe->dest_ring->sw_index = sw_index;
399
400 pipe->rx_buf_needed++;
401err:
402 ath11k_hal_srng_access_end(ab, srng);
403
404 spin_unlock_bh(&srng->lock);
405
406 spin_unlock_bh(&ab->ce.ce_lock);
407
408 return ret;
409}
410
411static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
412{
413 struct ath11k_base *ab = pipe->ab;
414 struct sk_buff *skb;
415 struct sk_buff_head list;
416 unsigned int nbytes, max_nbytes;
417 int ret;
418
419 __skb_queue_head_init(&list);
420 while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
421 max_nbytes = skb->len + skb_tailroom(skb);
422 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
423 max_nbytes, DMA_FROM_DEVICE);
424
425 if (unlikely(max_nbytes < nbytes)) {
426 ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
427 nbytes, max_nbytes);
428 dev_kfree_skb_any(skb);
429 continue;
430 }
431
432 skb_put(skb, nbytes);
433 __skb_queue_tail(&list, skb);
434 }
435
436 while ((skb = __skb_dequeue(&list))) {
437 ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
438 pipe->pipe_num, skb->len);
439 pipe->recv_cb(ab, skb);
440 }
441
442 ret = ath11k_ce_rx_post_pipe(pipe);
443 if (ret && ret != -ENOSPC) {
444 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
445 pipe->pipe_num, ret);
446 mod_timer(&ab->rx_replenish_retry,
447 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
448 }
449}
450
451static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
452{
453 struct ath11k_base *ab = pipe->ab;
454 struct hal_srng *srng;
455 unsigned int sw_index;
456 unsigned int nentries_mask;
457 struct sk_buff *skb;
458 u32 *desc;
459
460 spin_lock_bh(&ab->ce.ce_lock);
461
462 sw_index = pipe->src_ring->sw_index;
463 nentries_mask = pipe->src_ring->nentries_mask;
464
465 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
466
467 spin_lock_bh(&srng->lock);
468
469 ath11k_hal_srng_access_begin(ab, srng);
470
471 desc = ath11k_hal_srng_src_reap_next(ab, srng);
472 if (!desc) {
473 skb = ERR_PTR(-EIO);
474 goto err_unlock;
475 }
476
477 skb = pipe->src_ring->skb[sw_index];
478
479 pipe->src_ring->skb[sw_index] = NULL;
480
481 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
482 pipe->src_ring->sw_index = sw_index;
483
484err_unlock:
485 spin_unlock_bh(&srng->lock);
486
487 spin_unlock_bh(&ab->ce.ce_lock);
488
489 return skb;
490}
491
492static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
493{
494 struct ath11k_base *ab = pipe->ab;
495 struct sk_buff *skb;
496
497 while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
498 if (!skb)
499 continue;
500
501 dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
502 DMA_TO_DEVICE);
503 dev_kfree_skb_any(skb);
504 }
505}
506
507static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
508 struct hal_srng_params *ring_params)
509{
510 u32 msi_data_start;
511 u32 msi_data_count, msi_data_idx;
512 u32 msi_irq_start;
513 u32 addr_lo;
514 u32 addr_hi;
515 int ret;
516
517 ret = ath11k_get_user_msi_vector(ab, "CE",
518 &msi_data_count, &msi_data_start,
519 &msi_irq_start);
520
521 if (ret)
522 return;
523
524 ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
525 ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
526
527 ring_params->msi_addr = addr_lo;
528 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
529 ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
530 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
531}
532
533static int ath11k_ce_init_ring(struct ath11k_base *ab,
534 struct ath11k_ce_ring *ce_ring,
535 int ce_id, enum hal_ring_type type)
536{
537 struct hal_srng_params params = { 0 };
538 int ret;
539
540 params.ring_base_paddr = ce_ring->base_addr_ce_space;
541 params.ring_base_vaddr = ce_ring->base_addr_owner_space;
542 params.num_entries = ce_ring->nentries;
543
544 if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
545 ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms);
546
547 switch (type) {
548 case HAL_CE_SRC:
549 if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
550 params.intr_batch_cntr_thres_entries = 1;
551 break;
552 case HAL_CE_DST:
553 params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
554 if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
555 params.intr_timer_thres_us = 1024;
556 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
557 params.low_threshold = ce_ring->nentries - 3;
558 }
559 break;
560 case HAL_CE_DST_STATUS:
561 if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
562 params.intr_batch_cntr_thres_entries = 1;
563 params.intr_timer_thres_us = 0x1000;
564 }
565 break;
566 default:
567 ath11k_warn(ab, "Invalid CE ring type %d\n", type);
568 return -EINVAL;
569 }
570
571
572
573 ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms);
574 if (ret < 0) {
575 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
576 ret, ce_id);
577 return ret;
578 }
579
580 ce_ring->hal_ring_id = ret;
581
582 if (ab->hw_params.supports_shadow_regs &&
583 ath11k_ce_need_shadow_fix(ce_id))
584 ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
585 ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
586 ce_ring->hal_ring_id);
587
588 return 0;
589}
590
591static struct ath11k_ce_ring *
592ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
593{
594 struct ath11k_ce_ring *ce_ring;
595 dma_addr_t base_addr;
596
597 ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
598 if (ce_ring == NULL)
599 return ERR_PTR(-ENOMEM);
600
601 ce_ring->nentries = nentries;
602 ce_ring->nentries_mask = nentries - 1;
603
604
605
606
607 ce_ring->base_addr_owner_space_unaligned =
608 dma_alloc_coherent(ab->dev,
609 nentries * desc_sz + CE_DESC_RING_ALIGN,
610 &base_addr, GFP_KERNEL);
611 if (!ce_ring->base_addr_owner_space_unaligned) {
612 kfree(ce_ring);
613 return ERR_PTR(-ENOMEM);
614 }
615
616 ce_ring->base_addr_ce_space_unaligned = base_addr;
617
618 ce_ring->base_addr_owner_space = PTR_ALIGN(
619 ce_ring->base_addr_owner_space_unaligned,
620 CE_DESC_RING_ALIGN);
621 ce_ring->base_addr_ce_space = ALIGN(
622 ce_ring->base_addr_ce_space_unaligned,
623 CE_DESC_RING_ALIGN);
624
625 return ce_ring;
626}
627
628static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
629{
630 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
631 const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
632 struct ath11k_ce_ring *ring;
633 int nentries;
634 int desc_sz;
635
636 pipe->attr_flags = attr->flags;
637
638 if (attr->src_nentries) {
639 pipe->send_cb = ath11k_ce_send_done_cb;
640 nentries = roundup_pow_of_two(attr->src_nentries);
641 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
642 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
643 if (IS_ERR(ring))
644 return PTR_ERR(ring);
645 pipe->src_ring = ring;
646 }
647
648 if (attr->dest_nentries) {
649 pipe->recv_cb = attr->recv_cb;
650 nentries = roundup_pow_of_two(attr->dest_nentries);
651 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
652 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
653 if (IS_ERR(ring))
654 return PTR_ERR(ring);
655 pipe->dest_ring = ring;
656
657 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
658 ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
659 if (IS_ERR(ring))
660 return PTR_ERR(ring);
661 pipe->status_ring = ring;
662 }
663
664 return 0;
665}
666
667void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
668{
669 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
670
671 if (pipe->send_cb)
672 pipe->send_cb(pipe);
673
674 if (pipe->recv_cb)
675 ath11k_ce_recv_process_cb(pipe);
676}
677
678void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
679{
680 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
681
682 if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
683 pipe->send_cb(pipe);
684}
685EXPORT_SYMBOL(ath11k_ce_per_engine_service);
686
687int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
688 u16 transfer_id)
689{
690 struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
691 struct hal_srng *srng;
692 u32 *desc;
693 unsigned int write_index, sw_index;
694 unsigned int nentries_mask;
695 int ret = 0;
696 u8 byte_swap_data = 0;
697 int num_used;
698
699
700
701
702
703 if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
704 spin_lock_bh(&ab->ce.ce_lock);
705 write_index = pipe->src_ring->write_index;
706
707 sw_index = pipe->src_ring->sw_index;
708
709 if (write_index >= sw_index)
710 num_used = write_index - sw_index;
711 else
712 num_used = pipe->src_ring->nentries - sw_index +
713 write_index;
714
715 spin_unlock_bh(&ab->ce.ce_lock);
716
717 if (num_used > ATH11K_CE_USAGE_THRESHOLD)
718 ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
719 }
720
721 if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
722 return -ESHUTDOWN;
723
724 spin_lock_bh(&ab->ce.ce_lock);
725
726 write_index = pipe->src_ring->write_index;
727 nentries_mask = pipe->src_ring->nentries_mask;
728
729 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
730
731 spin_lock_bh(&srng->lock);
732
733 ath11k_hal_srng_access_begin(ab, srng);
734
735 if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
736 ath11k_hal_srng_access_end(ab, srng);
737 ret = -ENOBUFS;
738 goto err_unlock;
739 }
740
741 desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
742 if (!desc) {
743 ath11k_hal_srng_access_end(ab, srng);
744 ret = -ENOBUFS;
745 goto err_unlock;
746 }
747
748 if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
749 byte_swap_data = 1;
750
751 ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
752 skb->len, transfer_id, byte_swap_data);
753
754 pipe->src_ring->skb[write_index] = skb;
755 pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
756 write_index);
757
758 ath11k_hal_srng_access_end(ab, srng);
759
760 if (ath11k_ce_need_shadow_fix(pipe_id))
761 ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
762
763 spin_unlock_bh(&srng->lock);
764
765 spin_unlock_bh(&ab->ce.ce_lock);
766
767 return 0;
768
769err_unlock:
770 spin_unlock_bh(&srng->lock);
771
772 spin_unlock_bh(&ab->ce.ce_lock);
773
774 return ret;
775}
776
777static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
778{
779 struct ath11k_base *ab = pipe->ab;
780 struct ath11k_ce_ring *ring = pipe->dest_ring;
781 struct sk_buff *skb;
782 int i;
783
784 if (!(ring && pipe->buf_sz))
785 return;
786
787 for (i = 0; i < ring->nentries; i++) {
788 skb = ring->skb[i];
789 if (!skb)
790 continue;
791
792 ring->skb[i] = NULL;
793 dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
794 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
795 dev_kfree_skb_any(skb);
796 }
797}
798
799static void ath11k_ce_shadow_config(struct ath11k_base *ab)
800{
801 int i;
802
803 for (i = 0; i < ab->hw_params.ce_count; i++) {
804 if (ab->hw_params.host_ce_config[i].src_nentries)
805 ath11k_hal_srng_update_shadow_config(ab,
806 HAL_CE_SRC, i);
807
808 if (ab->hw_params.host_ce_config[i].dest_nentries) {
809 ath11k_hal_srng_update_shadow_config(ab,
810 HAL_CE_DST, i);
811
812 ath11k_hal_srng_update_shadow_config(ab,
813 HAL_CE_DST_STATUS, i);
814 }
815 }
816}
817
818void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
819 u32 **shadow_cfg, u32 *shadow_cfg_len)
820{
821 if (!ab->hw_params.supports_shadow_regs)
822 return;
823
824 ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
825
826
827 if (*shadow_cfg_len)
828 return;
829
830
831
832
833
834 ath11k_hal_srng_shadow_config(ab);
835 ath11k_ce_shadow_config(ab);
836
837
838 ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
839}
840EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
841
842void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
843{
844 struct ath11k_ce_pipe *pipe;
845 int pipe_num;
846
847 ath11k_ce_stop_shadow_timers(ab);
848
849 for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
850 pipe = &ab->ce.ce_pipe[pipe_num];
851 ath11k_ce_rx_pipe_cleanup(pipe);
852
853
854 ath11k_ce_poll_send_completed(ab, pipe_num);
855
856
857 }
858}
859EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
860
861void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
862{
863 struct ath11k_ce_pipe *pipe;
864 int i;
865 int ret;
866
867 for (i = 0; i < ab->hw_params.ce_count; i++) {
868 pipe = &ab->ce.ce_pipe[i];
869 ret = ath11k_ce_rx_post_pipe(pipe);
870 if (ret) {
871 if (ret == -ENOSPC)
872 continue;
873
874 ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
875 i, ret);
876 mod_timer(&ab->rx_replenish_retry,
877 jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
878
879 return;
880 }
881 }
882}
883EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
884
885void ath11k_ce_rx_replenish_retry(struct timer_list *t)
886{
887 struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
888
889 ath11k_ce_rx_post_buf(ab);
890}
891
892int ath11k_ce_init_pipes(struct ath11k_base *ab)
893{
894 struct ath11k_ce_pipe *pipe;
895 int i;
896 int ret;
897
898 ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
899 &ab->qmi.ce_cfg.shadow_reg_v2_len);
900
901 for (i = 0; i < ab->hw_params.ce_count; i++) {
902 pipe = &ab->ce.ce_pipe[i];
903
904 if (pipe->src_ring) {
905 ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
906 HAL_CE_SRC);
907 if (ret) {
908 ath11k_warn(ab, "failed to init src ring: %d\n",
909 ret);
910
911 return ret;
912 }
913
914 pipe->src_ring->write_index = 0;
915 pipe->src_ring->sw_index = 0;
916 }
917
918 if (pipe->dest_ring) {
919 ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
920 HAL_CE_DST);
921 if (ret) {
922 ath11k_warn(ab, "failed to init dest ring: %d\n",
923 ret);
924
925 return ret;
926 }
927
928 pipe->rx_buf_needed = pipe->dest_ring->nentries ?
929 pipe->dest_ring->nentries - 2 : 0;
930
931 pipe->dest_ring->write_index = 0;
932 pipe->dest_ring->sw_index = 0;
933 }
934
935 if (pipe->status_ring) {
936 ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
937 HAL_CE_DST_STATUS);
938 if (ret) {
939 ath11k_warn(ab, "failed to init dest status ing: %d\n",
940 ret);
941
942 return ret;
943 }
944
945 pipe->status_ring->write_index = 0;
946 pipe->status_ring->sw_index = 0;
947 }
948 }
949
950 return 0;
951}
952
953void ath11k_ce_free_pipes(struct ath11k_base *ab)
954{
955 struct ath11k_ce_pipe *pipe;
956 int desc_sz;
957 int i;
958
959 for (i = 0; i < ab->hw_params.ce_count; i++) {
960 pipe = &ab->ce.ce_pipe[i];
961
962 if (ath11k_ce_need_shadow_fix(i))
963 ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
964
965 if (pipe->src_ring) {
966 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
967 dma_free_coherent(ab->dev,
968 pipe->src_ring->nentries * desc_sz +
969 CE_DESC_RING_ALIGN,
970 pipe->src_ring->base_addr_owner_space,
971 pipe->src_ring->base_addr_ce_space);
972 kfree(pipe->src_ring);
973 pipe->src_ring = NULL;
974 }
975
976 if (pipe->dest_ring) {
977 desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
978 dma_free_coherent(ab->dev,
979 pipe->dest_ring->nentries * desc_sz +
980 CE_DESC_RING_ALIGN,
981 pipe->dest_ring->base_addr_owner_space,
982 pipe->dest_ring->base_addr_ce_space);
983 kfree(pipe->dest_ring);
984 pipe->dest_ring = NULL;
985 }
986
987 if (pipe->status_ring) {
988 desc_sz =
989 ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
990 dma_free_coherent(ab->dev,
991 pipe->status_ring->nentries * desc_sz +
992 CE_DESC_RING_ALIGN,
993 pipe->status_ring->base_addr_owner_space,
994 pipe->status_ring->base_addr_ce_space);
995 kfree(pipe->status_ring);
996 pipe->status_ring = NULL;
997 }
998 }
999}
1000EXPORT_SYMBOL(ath11k_ce_free_pipes);
1001
1002int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
1003{
1004 struct ath11k_ce_pipe *pipe;
1005 int i;
1006 int ret;
1007 const struct ce_attr *attr;
1008
1009 spin_lock_init(&ab->ce.ce_lock);
1010
1011 for (i = 0; i < ab->hw_params.ce_count; i++) {
1012 attr = &ab->hw_params.host_ce_config[i];
1013 pipe = &ab->ce.ce_pipe[i];
1014 pipe->pipe_num = i;
1015 pipe->ab = ab;
1016 pipe->buf_sz = attr->src_sz_max;
1017
1018 ret = ath11k_ce_alloc_pipe(ab, i);
1019 if (ret) {
1020
1021 ath11k_ce_free_pipes(ab);
1022 return ret;
1023 }
1024 }
1025
1026 return 0;
1027}
1028EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
1029
1030
1031
1032
1033
1034void ath11k_ce_byte_swap(void *mem, u32 len)
1035{
1036 int i;
1037
1038 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
1039 if (!mem)
1040 return;
1041
1042 for (i = 0; i < (len / 4); i++) {
1043 *(u32 *)mem = swab32(*(u32 *)mem);
1044 mem += 4;
1045 }
1046 }
1047}
1048
1049int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
1050{
1051 if (ce_id >= ab->hw_params.ce_count)
1052 return -EINVAL;
1053
1054 return ab->hw_params.host_ce_config[ce_id].flags;
1055}
1056EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
1057