1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "hif.h"
19#include "pci.h"
20#include "ce.h"
21#include "debug.h"
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
63 u32 ce_ctrl_addr,
64 unsigned int n)
65{
66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
67}
68
69static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
70 u32 ce_ctrl_addr)
71{
72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
73}
74
75static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
76 u32 ce_ctrl_addr,
77 unsigned int n)
78{
79 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
80}
81
82static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
83 u32 ce_ctrl_addr)
84{
85 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
86}
87
88static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
89 u32 ce_ctrl_addr)
90{
91 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
92}
93
94static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
95 u32 ce_ctrl_addr,
96 unsigned int addr)
97{
98 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
99}
100
101static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
102 u32 ce_ctrl_addr,
103 unsigned int n)
104{
105 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
106}
107
108static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
109 u32 ce_ctrl_addr,
110 unsigned int n)
111{
112 u32 ctrl1_addr = ath10k_pci_read32((ar),
113 (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
114
115 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
116 (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
117 CE_CTRL1_DMAX_LENGTH_SET(n));
118}
119
120static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
121 u32 ce_ctrl_addr,
122 unsigned int n)
123{
124 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
125
126 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
127 (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
128 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
129}
130
131static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
132 u32 ce_ctrl_addr,
133 unsigned int n)
134{
135 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
136
137 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
138 (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
139 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
140}
141
142static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
143 u32 ce_ctrl_addr)
144{
145 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
146}
147
148static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
149 u32 ce_ctrl_addr,
150 u32 addr)
151{
152 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
153}
154
155static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
156 u32 ce_ctrl_addr,
157 unsigned int n)
158{
159 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
160}
161
162static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
163 u32 ce_ctrl_addr,
164 unsigned int n)
165{
166 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
167
168 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
169 (addr & ~SRC_WATERMARK_HIGH_MASK) |
170 SRC_WATERMARK_HIGH_SET(n));
171}
172
173static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
174 u32 ce_ctrl_addr,
175 unsigned int n)
176{
177 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
178
179 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
180 (addr & ~SRC_WATERMARK_LOW_MASK) |
181 SRC_WATERMARK_LOW_SET(n));
182}
183
184static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
185 u32 ce_ctrl_addr,
186 unsigned int n)
187{
188 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
189
190 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
191 (addr & ~DST_WATERMARK_HIGH_MASK) |
192 DST_WATERMARK_HIGH_SET(n));
193}
194
195static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
196 u32 ce_ctrl_addr,
197 unsigned int n)
198{
199 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
200
201 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
202 (addr & ~DST_WATERMARK_LOW_MASK) |
203 DST_WATERMARK_LOW_SET(n));
204}
205
206static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
207 u32 ce_ctrl_addr)
208{
209 u32 host_ie_addr = ath10k_pci_read32(ar,
210 ce_ctrl_addr + HOST_IE_ADDRESS);
211
212 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
213 host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
214}
215
216static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
217 u32 ce_ctrl_addr)
218{
219 u32 host_ie_addr = ath10k_pci_read32(ar,
220 ce_ctrl_addr + HOST_IE_ADDRESS);
221
222 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
223 host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
224}
225
226static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
227 u32 ce_ctrl_addr)
228{
229 u32 host_ie_addr = ath10k_pci_read32(ar,
230 ce_ctrl_addr + HOST_IE_ADDRESS);
231
232 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
233 host_ie_addr & ~CE_WATERMARK_MASK);
234}
235
236static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
237 u32 ce_ctrl_addr)
238{
239 u32 misc_ie_addr = ath10k_pci_read32(ar,
240 ce_ctrl_addr + MISC_IE_ADDRESS);
241
242 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
243 misc_ie_addr | CE_ERROR_MASK);
244}
245
246static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
247 u32 ce_ctrl_addr)
248{
249 u32 misc_ie_addr = ath10k_pci_read32(ar,
250 ce_ctrl_addr + MISC_IE_ADDRESS);
251
252 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
253 misc_ie_addr & ~CE_ERROR_MASK);
254}
255
256static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
257 u32 ce_ctrl_addr,
258 unsigned int mask)
259{
260 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
261}
262
263
264
265
266
267
268int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
269 void *per_transfer_context,
270 u32 buffer,
271 unsigned int nbytes,
272 unsigned int transfer_id,
273 unsigned int flags)
274{
275 struct ath10k *ar = ce_state->ar;
276 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
277 struct ce_desc *desc, sdesc;
278 unsigned int nentries_mask = src_ring->nentries_mask;
279 unsigned int sw_index = src_ring->sw_index;
280 unsigned int write_index = src_ring->write_index;
281 u32 ctrl_addr = ce_state->ctrl_addr;
282 u32 desc_flags = 0;
283 int ret = 0;
284
285 if (nbytes > ce_state->src_sz_max)
286 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
287 __func__, nbytes, ce_state->src_sz_max);
288
289 if (unlikely(CE_RING_DELTA(nentries_mask,
290 write_index, sw_index - 1) <= 0)) {
291 ret = -ENOSR;
292 goto exit;
293 }
294
295 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
296 write_index);
297
298 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
299
300 if (flags & CE_SEND_FLAG_GATHER)
301 desc_flags |= CE_DESC_FLAGS_GATHER;
302 if (flags & CE_SEND_FLAG_BYTE_SWAP)
303 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
304
305 sdesc.addr = __cpu_to_le32(buffer);
306 sdesc.nbytes = __cpu_to_le16(nbytes);
307 sdesc.flags = __cpu_to_le16(desc_flags);
308
309 *desc = sdesc;
310
311 src_ring->per_transfer_context[write_index] = per_transfer_context;
312
313
314 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
315
316
317 if (!(flags & CE_SEND_FLAG_GATHER))
318 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
319
320 src_ring->write_index = write_index;
321exit:
322 return ret;
323}
324
325void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
326{
327 struct ath10k *ar = pipe->ar;
328 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
329 struct ath10k_ce_ring *src_ring = pipe->src_ring;
330 u32 ctrl_addr = pipe->ctrl_addr;
331
332 lockdep_assert_held(&ar_pci->ce_lock);
333
334
335
336
337
338
339 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
340 return;
341
342 if (WARN_ON_ONCE(src_ring->write_index ==
343 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
344 return;
345
346 src_ring->write_index--;
347 src_ring->write_index &= src_ring->nentries_mask;
348
349 src_ring->per_transfer_context[src_ring->write_index] = NULL;
350}
351
352int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
353 void *per_transfer_context,
354 u32 buffer,
355 unsigned int nbytes,
356 unsigned int transfer_id,
357 unsigned int flags)
358{
359 struct ath10k *ar = ce_state->ar;
360 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
361 int ret;
362
363 spin_lock_bh(&ar_pci->ce_lock);
364 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
365 buffer, nbytes, transfer_id, flags);
366 spin_unlock_bh(&ar_pci->ce_lock);
367
368 return ret;
369}
370
371int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
372{
373 struct ath10k *ar = pipe->ar;
374 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
375 int delta;
376
377 spin_lock_bh(&ar_pci->ce_lock);
378 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
379 pipe->src_ring->write_index,
380 pipe->src_ring->sw_index - 1);
381 spin_unlock_bh(&ar_pci->ce_lock);
382
383 return delta;
384}
385
386int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
387{
388 struct ath10k *ar = pipe->ar;
389 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
390 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
391 unsigned int nentries_mask = dest_ring->nentries_mask;
392 unsigned int write_index = dest_ring->write_index;
393 unsigned int sw_index = dest_ring->sw_index;
394
395 lockdep_assert_held(&ar_pci->ce_lock);
396
397 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
398}
399
400int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
401{
402 struct ath10k *ar = pipe->ar;
403 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
404 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
405 unsigned int nentries_mask = dest_ring->nentries_mask;
406 unsigned int write_index = dest_ring->write_index;
407 unsigned int sw_index = dest_ring->sw_index;
408 struct ce_desc *base = dest_ring->base_addr_owner_space;
409 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
410 u32 ctrl_addr = pipe->ctrl_addr;
411
412 lockdep_assert_held(&ar_pci->ce_lock);
413
414 if ((pipe->id != 5) &&
415 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
416 return -ENOSPC;
417
418 desc->addr = __cpu_to_le32(paddr);
419 desc->nbytes = 0;
420
421 dest_ring->per_transfer_context[write_index] = ctx;
422 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
423 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
424 dest_ring->write_index = write_index;
425
426 return 0;
427}
428
429void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
430{
431 struct ath10k *ar = pipe->ar;
432 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
433 unsigned int nentries_mask = dest_ring->nentries_mask;
434 unsigned int write_index = dest_ring->write_index;
435 u32 ctrl_addr = pipe->ctrl_addr;
436
437 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
438 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
439 dest_ring->write_index = write_index;
440}
441
442int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
443{
444 struct ath10k *ar = pipe->ar;
445 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
446 int ret;
447
448 spin_lock_bh(&ar_pci->ce_lock);
449 ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
450 spin_unlock_bh(&ar_pci->ce_lock);
451
452 return ret;
453}
454
455
456
457
458
459int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
460 void **per_transfer_contextp,
461 unsigned int *nbytesp)
462{
463 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
464 unsigned int nentries_mask = dest_ring->nentries_mask;
465 unsigned int sw_index = dest_ring->sw_index;
466
467 struct ce_desc *base = dest_ring->base_addr_owner_space;
468 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
469 struct ce_desc sdesc;
470 u16 nbytes;
471
472
473 sdesc = *desc;
474
475 nbytes = __le16_to_cpu(sdesc.nbytes);
476 if (nbytes == 0) {
477
478
479
480
481
482
483 return -EIO;
484 }
485
486 desc->nbytes = 0;
487
488
489 *nbytesp = nbytes;
490
491 if (per_transfer_contextp)
492 *per_transfer_contextp =
493 dest_ring->per_transfer_context[sw_index];
494
495
496
497
498 if (ce_state->id != 5)
499 dest_ring->per_transfer_context[sw_index] = NULL;
500
501
502 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
503 dest_ring->sw_index = sw_index;
504
505 return 0;
506}
507
508int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
509 void **per_transfer_contextp,
510 unsigned int *nbytesp)
511{
512 struct ath10k *ar = ce_state->ar;
513 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
514 int ret;
515
516 spin_lock_bh(&ar_pci->ce_lock);
517 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
518 per_transfer_contextp,
519 nbytesp);
520 spin_unlock_bh(&ar_pci->ce_lock);
521
522 return ret;
523}
524
525int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
526 void **per_transfer_contextp,
527 u32 *bufferp)
528{
529 struct ath10k_ce_ring *dest_ring;
530 unsigned int nentries_mask;
531 unsigned int sw_index;
532 unsigned int write_index;
533 int ret;
534 struct ath10k *ar;
535 struct ath10k_pci *ar_pci;
536
537 dest_ring = ce_state->dest_ring;
538
539 if (!dest_ring)
540 return -EIO;
541
542 ar = ce_state->ar;
543 ar_pci = ath10k_pci_priv(ar);
544
545 spin_lock_bh(&ar_pci->ce_lock);
546
547 nentries_mask = dest_ring->nentries_mask;
548 sw_index = dest_ring->sw_index;
549 write_index = dest_ring->write_index;
550 if (write_index != sw_index) {
551 struct ce_desc *base = dest_ring->base_addr_owner_space;
552 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
553
554
555 *bufferp = __le32_to_cpu(desc->addr);
556
557 if (per_transfer_contextp)
558 *per_transfer_contextp =
559 dest_ring->per_transfer_context[sw_index];
560
561
562 dest_ring->per_transfer_context[sw_index] = NULL;
563 desc->nbytes = 0;
564
565
566 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
567 dest_ring->sw_index = sw_index;
568 ret = 0;
569 } else {
570 ret = -EIO;
571 }
572
573 spin_unlock_bh(&ar_pci->ce_lock);
574
575 return ret;
576}
577
578
579
580
581
582int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
583 void **per_transfer_contextp)
584{
585 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
586 u32 ctrl_addr = ce_state->ctrl_addr;
587 struct ath10k *ar = ce_state->ar;
588 unsigned int nentries_mask = src_ring->nentries_mask;
589 unsigned int sw_index = src_ring->sw_index;
590 unsigned int read_index;
591
592 if (src_ring->hw_index == sw_index) {
593
594
595
596
597
598
599
600
601 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
602 if (read_index == 0xffffffff)
603 return -ENODEV;
604
605 read_index &= nentries_mask;
606 src_ring->hw_index = read_index;
607 }
608
609 read_index = src_ring->hw_index;
610
611 if (read_index == sw_index)
612 return -EIO;
613
614 if (per_transfer_contextp)
615 *per_transfer_contextp =
616 src_ring->per_transfer_context[sw_index];
617
618
619 src_ring->per_transfer_context[sw_index] = NULL;
620
621
622 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
623 src_ring->sw_index = sw_index;
624
625 return 0;
626}
627
628
629int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
630 void **per_transfer_contextp,
631 u32 *bufferp,
632 unsigned int *nbytesp,
633 unsigned int *transfer_idp)
634{
635 struct ath10k_ce_ring *src_ring;
636 unsigned int nentries_mask;
637 unsigned int sw_index;
638 unsigned int write_index;
639 int ret;
640 struct ath10k *ar;
641 struct ath10k_pci *ar_pci;
642
643 src_ring = ce_state->src_ring;
644
645 if (!src_ring)
646 return -EIO;
647
648 ar = ce_state->ar;
649 ar_pci = ath10k_pci_priv(ar);
650
651 spin_lock_bh(&ar_pci->ce_lock);
652
653 nentries_mask = src_ring->nentries_mask;
654 sw_index = src_ring->sw_index;
655 write_index = src_ring->write_index;
656
657 if (write_index != sw_index) {
658 struct ce_desc *base = src_ring->base_addr_owner_space;
659 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
660
661
662 *bufferp = __le32_to_cpu(desc->addr);
663 *nbytesp = __le16_to_cpu(desc->nbytes);
664 *transfer_idp = MS(__le16_to_cpu(desc->flags),
665 CE_DESC_FLAGS_META_DATA);
666
667 if (per_transfer_contextp)
668 *per_transfer_contextp =
669 src_ring->per_transfer_context[sw_index];
670
671
672 src_ring->per_transfer_context[sw_index] = NULL;
673
674
675 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
676 src_ring->sw_index = sw_index;
677 ret = 0;
678 } else {
679 ret = -EIO;
680 }
681
682 spin_unlock_bh(&ar_pci->ce_lock);
683
684 return ret;
685}
686
687int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
688 void **per_transfer_contextp)
689{
690 struct ath10k *ar = ce_state->ar;
691 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
692 int ret;
693
694 spin_lock_bh(&ar_pci->ce_lock);
695 ret = ath10k_ce_completed_send_next_nolock(ce_state,
696 per_transfer_contextp);
697 spin_unlock_bh(&ar_pci->ce_lock);
698
699 return ret;
700}
701
702
703
704
705
706
707
708void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
709{
710 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
711 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
712 u32 ctrl_addr = ce_state->ctrl_addr;
713
714 spin_lock_bh(&ar_pci->ce_lock);
715
716
717 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
718 HOST_IS_COPY_COMPLETE_MASK);
719
720 spin_unlock_bh(&ar_pci->ce_lock);
721
722 if (ce_state->recv_cb)
723 ce_state->recv_cb(ce_state);
724
725 if (ce_state->send_cb)
726 ce_state->send_cb(ce_state);
727
728 spin_lock_bh(&ar_pci->ce_lock);
729
730
731
732
733
734 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
735
736 spin_unlock_bh(&ar_pci->ce_lock);
737}
738
739
740
741
742
743
744
745void ath10k_ce_per_engine_service_any(struct ath10k *ar)
746{
747 int ce_id;
748 u32 intr_summary;
749
750 intr_summary = CE_INTERRUPT_SUMMARY(ar);
751
752 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
753 if (intr_summary & (1 << ce_id))
754 intr_summary &= ~(1 << ce_id);
755 else
756
757 continue;
758
759 ath10k_ce_per_engine_service(ar, ce_id);
760 }
761}
762
763
764
765
766
767
768
769
770static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
771{
772 u32 ctrl_addr = ce_state->ctrl_addr;
773 struct ath10k *ar = ce_state->ar;
774 bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
775
776 if ((!disable_copy_compl_intr) &&
777 (ce_state->send_cb || ce_state->recv_cb))
778 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
779 else
780 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
781
782 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
783}
784
785int ath10k_ce_disable_interrupts(struct ath10k *ar)
786{
787 int ce_id;
788
789 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
790 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
791
792 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
793 ath10k_ce_error_intr_disable(ar, ctrl_addr);
794 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
795 }
796
797 return 0;
798}
799
800void ath10k_ce_enable_interrupts(struct ath10k *ar)
801{
802 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
803 int ce_id;
804
805
806
807
808 for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
809 ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
810}
811
812static int ath10k_ce_init_src_ring(struct ath10k *ar,
813 unsigned int ce_id,
814 const struct ce_attr *attr)
815{
816 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
817 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
818 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
819 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
820
821 nentries = roundup_pow_of_two(attr->src_nentries);
822
823 memset(src_ring->base_addr_owner_space, 0,
824 nentries * sizeof(struct ce_desc));
825
826 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
827 src_ring->sw_index &= src_ring->nentries_mask;
828 src_ring->hw_index = src_ring->sw_index;
829
830 src_ring->write_index =
831 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
832 src_ring->write_index &= src_ring->nentries_mask;
833
834 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
835 src_ring->base_addr_ce_space);
836 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
837 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
838 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
839 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
840 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
841
842 ath10k_dbg(ar, ATH10K_DBG_BOOT,
843 "boot init ce src ring id %d entries %d base_addr %p\n",
844 ce_id, nentries, src_ring->base_addr_owner_space);
845
846 return 0;
847}
848
849static int ath10k_ce_init_dest_ring(struct ath10k *ar,
850 unsigned int ce_id,
851 const struct ce_attr *attr)
852{
853 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
854 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
855 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
856 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
857
858 nentries = roundup_pow_of_two(attr->dest_nentries);
859
860 memset(dest_ring->base_addr_owner_space, 0,
861 nentries * sizeof(struct ce_desc));
862
863 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
864 dest_ring->sw_index &= dest_ring->nentries_mask;
865 dest_ring->write_index =
866 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
867 dest_ring->write_index &= dest_ring->nentries_mask;
868
869 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
870 dest_ring->base_addr_ce_space);
871 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
872 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
873 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
874 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
875
876 ath10k_dbg(ar, ATH10K_DBG_BOOT,
877 "boot ce dest ring id %d entries %d base_addr %p\n",
878 ce_id, nentries, dest_ring->base_addr_owner_space);
879
880 return 0;
881}
882
883static struct ath10k_ce_ring *
884ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
885 const struct ce_attr *attr)
886{
887 struct ath10k_ce_ring *src_ring;
888 u32 nentries = attr->src_nentries;
889 dma_addr_t base_addr;
890
891 nentries = roundup_pow_of_two(nentries);
892
893 src_ring = kzalloc(sizeof(*src_ring) +
894 (nentries *
895 sizeof(*src_ring->per_transfer_context)),
896 GFP_KERNEL);
897 if (src_ring == NULL)
898 return ERR_PTR(-ENOMEM);
899
900 src_ring->nentries = nentries;
901 src_ring->nentries_mask = nentries - 1;
902
903
904
905
906
907 src_ring->base_addr_owner_space_unaligned =
908 dma_alloc_coherent(ar->dev,
909 (nentries * sizeof(struct ce_desc) +
910 CE_DESC_RING_ALIGN),
911 &base_addr, GFP_KERNEL);
912 if (!src_ring->base_addr_owner_space_unaligned) {
913 kfree(src_ring);
914 return ERR_PTR(-ENOMEM);
915 }
916
917 src_ring->base_addr_ce_space_unaligned = base_addr;
918
919 src_ring->base_addr_owner_space = PTR_ALIGN(
920 src_ring->base_addr_owner_space_unaligned,
921 CE_DESC_RING_ALIGN);
922 src_ring->base_addr_ce_space = ALIGN(
923 src_ring->base_addr_ce_space_unaligned,
924 CE_DESC_RING_ALIGN);
925
926 return src_ring;
927}
928
929static struct ath10k_ce_ring *
930ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
931 const struct ce_attr *attr)
932{
933 struct ath10k_ce_ring *dest_ring;
934 u32 nentries;
935 dma_addr_t base_addr;
936
937 nentries = roundup_pow_of_two(attr->dest_nentries);
938
939 dest_ring = kzalloc(sizeof(*dest_ring) +
940 (nentries *
941 sizeof(*dest_ring->per_transfer_context)),
942 GFP_KERNEL);
943 if (dest_ring == NULL)
944 return ERR_PTR(-ENOMEM);
945
946 dest_ring->nentries = nentries;
947 dest_ring->nentries_mask = nentries - 1;
948
949
950
951
952
953 dest_ring->base_addr_owner_space_unaligned =
954 dma_alloc_coherent(ar->dev,
955 (nentries * sizeof(struct ce_desc) +
956 CE_DESC_RING_ALIGN),
957 &base_addr, GFP_KERNEL);
958 if (!dest_ring->base_addr_owner_space_unaligned) {
959 kfree(dest_ring);
960 return ERR_PTR(-ENOMEM);
961 }
962
963 dest_ring->base_addr_ce_space_unaligned = base_addr;
964
965
966
967
968
969 memset(dest_ring->base_addr_owner_space_unaligned, 0,
970 nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
971
972 dest_ring->base_addr_owner_space = PTR_ALIGN(
973 dest_ring->base_addr_owner_space_unaligned,
974 CE_DESC_RING_ALIGN);
975 dest_ring->base_addr_ce_space = ALIGN(
976 dest_ring->base_addr_ce_space_unaligned,
977 CE_DESC_RING_ALIGN);
978
979 return dest_ring;
980}
981
982
983
984
985
986
987
988
989int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
990 const struct ce_attr *attr)
991{
992 int ret;
993
994 if (attr->src_nentries) {
995 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
996 if (ret) {
997 ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
998 ce_id, ret);
999 return ret;
1000 }
1001 }
1002
1003 if (attr->dest_nentries) {
1004 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1005 if (ret) {
1006 ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1007 ce_id, ret);
1008 return ret;
1009 }
1010 }
1011
1012 return 0;
1013}
1014
1015static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1016{
1017 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1018
1019 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
1020 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1021 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1022 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1023}
1024
1025static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1026{
1027 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1028
1029 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
1030 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1031 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1032}
1033
1034void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1035{
1036 ath10k_ce_deinit_src_ring(ar, ce_id);
1037 ath10k_ce_deinit_dest_ring(ar, ce_id);
1038}
1039
1040int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1041 const struct ce_attr *attr)
1042{
1043 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1044 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1045 int ret;
1046
1047
1048
1049
1050
1051
1052
1053 BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
1054 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1055 BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC >
1056 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1057 BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
1058 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1059
1060 ce_state->ar = ar;
1061 ce_state->id = ce_id;
1062 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1063 ce_state->attr_flags = attr->flags;
1064 ce_state->src_sz_max = attr->src_sz_max;
1065
1066 if (attr->src_nentries)
1067 ce_state->send_cb = attr->send_cb;
1068
1069 if (attr->dest_nentries)
1070 ce_state->recv_cb = attr->recv_cb;
1071
1072 if (attr->src_nentries) {
1073 ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
1074 if (IS_ERR(ce_state->src_ring)) {
1075 ret = PTR_ERR(ce_state->src_ring);
1076 ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
1077 ce_id, ret);
1078 ce_state->src_ring = NULL;
1079 return ret;
1080 }
1081 }
1082
1083 if (attr->dest_nentries) {
1084 ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
1085 attr);
1086 if (IS_ERR(ce_state->dest_ring)) {
1087 ret = PTR_ERR(ce_state->dest_ring);
1088 ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
1089 ce_id, ret);
1090 ce_state->dest_ring = NULL;
1091 return ret;
1092 }
1093 }
1094
1095 return 0;
1096}
1097
1098void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1099{
1100 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1101 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1102
1103 if (ce_state->src_ring) {
1104 dma_free_coherent(ar->dev,
1105 (ce_state->src_ring->nentries *
1106 sizeof(struct ce_desc) +
1107 CE_DESC_RING_ALIGN),
1108 ce_state->src_ring->base_addr_owner_space,
1109 ce_state->src_ring->base_addr_ce_space);
1110 kfree(ce_state->src_ring);
1111 }
1112
1113 if (ce_state->dest_ring) {
1114 dma_free_coherent(ar->dev,
1115 (ce_state->dest_ring->nentries *
1116 sizeof(struct ce_desc) +
1117 CE_DESC_RING_ALIGN),
1118 ce_state->dest_ring->base_addr_owner_space,
1119 ce_state->dest_ring->base_addr_ce_space);
1120 kfree(ce_state->dest_ring);
1121 }
1122
1123 ce_state->src_ring = NULL;
1124 ce_state->dest_ring = NULL;
1125}
1126