1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "hif.h"
19#include "pci.h"
20#include "ce.h"
21#include "debug.h"
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
63 u32 ce_ctrl_addr,
64 unsigned int n)
65{
66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
67}
68
69static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
70 u32 ce_ctrl_addr)
71{
72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
73}
74
75static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
76 u32 ce_ctrl_addr,
77 unsigned int n)
78{
79 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
80}
81
82static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
83 u32 ce_ctrl_addr)
84{
85 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
86}
87
88static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
89 u32 ce_ctrl_addr)
90{
91 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
92}
93
94static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
95 u32 ce_ctrl_addr,
96 unsigned int addr)
97{
98 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
99}
100
101static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
102 u32 ce_ctrl_addr,
103 unsigned int n)
104{
105 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
106}
107
108static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
109 u32 ce_ctrl_addr,
110 unsigned int n)
111{
112 u32 ctrl1_addr = ath10k_pci_read32((ar),
113 (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
114
115 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
116 (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
117 CE_CTRL1_DMAX_LENGTH_SET(n));
118}
119
120static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
121 u32 ce_ctrl_addr,
122 unsigned int n)
123{
124 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
125
126 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
127 (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
128 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
129}
130
131static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
132 u32 ce_ctrl_addr,
133 unsigned int n)
134{
135 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
136
137 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
138 (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
139 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
140}
141
142static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
143 u32 ce_ctrl_addr)
144{
145 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
146}
147
148static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
149 u32 ce_ctrl_addr,
150 u32 addr)
151{
152 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
153}
154
155static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
156 u32 ce_ctrl_addr,
157 unsigned int n)
158{
159 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
160}
161
162static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
163 u32 ce_ctrl_addr,
164 unsigned int n)
165{
166 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
167
168 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
169 (addr & ~SRC_WATERMARK_HIGH_MASK) |
170 SRC_WATERMARK_HIGH_SET(n));
171}
172
173static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
174 u32 ce_ctrl_addr,
175 unsigned int n)
176{
177 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
178
179 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
180 (addr & ~SRC_WATERMARK_LOW_MASK) |
181 SRC_WATERMARK_LOW_SET(n));
182}
183
184static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
185 u32 ce_ctrl_addr,
186 unsigned int n)
187{
188 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
189
190 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
191 (addr & ~DST_WATERMARK_HIGH_MASK) |
192 DST_WATERMARK_HIGH_SET(n));
193}
194
195static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
196 u32 ce_ctrl_addr,
197 unsigned int n)
198{
199 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
200
201 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
202 (addr & ~DST_WATERMARK_LOW_MASK) |
203 DST_WATERMARK_LOW_SET(n));
204}
205
206static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
207 u32 ce_ctrl_addr)
208{
209 u32 host_ie_addr = ath10k_pci_read32(ar,
210 ce_ctrl_addr + HOST_IE_ADDRESS);
211
212 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
213 host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
214}
215
216static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
217 u32 ce_ctrl_addr)
218{
219 u32 host_ie_addr = ath10k_pci_read32(ar,
220 ce_ctrl_addr + HOST_IE_ADDRESS);
221
222 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
223 host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
224}
225
226static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
227 u32 ce_ctrl_addr)
228{
229 u32 host_ie_addr = ath10k_pci_read32(ar,
230 ce_ctrl_addr + HOST_IE_ADDRESS);
231
232 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
233 host_ie_addr & ~CE_WATERMARK_MASK);
234}
235
236static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
237 u32 ce_ctrl_addr)
238{
239 u32 misc_ie_addr = ath10k_pci_read32(ar,
240 ce_ctrl_addr + MISC_IE_ADDRESS);
241
242 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
243 misc_ie_addr | CE_ERROR_MASK);
244}
245
246static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
247 u32 ce_ctrl_addr)
248{
249 u32 misc_ie_addr = ath10k_pci_read32(ar,
250 ce_ctrl_addr + MISC_IE_ADDRESS);
251
252 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
253 misc_ie_addr & ~CE_ERROR_MASK);
254}
255
256static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
257 u32 ce_ctrl_addr,
258 unsigned int mask)
259{
260 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
261}
262
263
264
265
266
267
268
269int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
270 void *per_transfer_context,
271 u32 buffer,
272 unsigned int nbytes,
273 unsigned int transfer_id,
274 unsigned int flags)
275{
276 struct ath10k *ar = ce_state->ar;
277 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
278 struct ce_desc *desc, *sdesc;
279 unsigned int nentries_mask = src_ring->nentries_mask;
280 unsigned int sw_index = src_ring->sw_index;
281 unsigned int write_index = src_ring->write_index;
282 u32 ctrl_addr = ce_state->ctrl_addr;
283 u32 desc_flags = 0;
284 int ret = 0;
285
286 if (nbytes > ce_state->src_sz_max)
287 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
288 __func__, nbytes, ce_state->src_sz_max);
289
290 ret = ath10k_pci_wake(ar);
291 if (ret)
292 return ret;
293
294 if (unlikely(CE_RING_DELTA(nentries_mask,
295 write_index, sw_index - 1) <= 0)) {
296 ret = -ENOSR;
297 goto exit;
298 }
299
300 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
301 write_index);
302 sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
303
304 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
305
306 if (flags & CE_SEND_FLAG_GATHER)
307 desc_flags |= CE_DESC_FLAGS_GATHER;
308 if (flags & CE_SEND_FLAG_BYTE_SWAP)
309 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
310
311 sdesc->addr = __cpu_to_le32(buffer);
312 sdesc->nbytes = __cpu_to_le16(nbytes);
313 sdesc->flags = __cpu_to_le16(desc_flags);
314
315 *desc = *sdesc;
316
317 src_ring->per_transfer_context[write_index] = per_transfer_context;
318
319
320 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
321
322
323 if (!(flags & CE_SEND_FLAG_GATHER))
324 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
325
326 src_ring->write_index = write_index;
327exit:
328 ath10k_pci_sleep(ar);
329 return ret;
330}
331
332void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
333{
334 struct ath10k *ar = pipe->ar;
335 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
336 struct ath10k_ce_ring *src_ring = pipe->src_ring;
337 u32 ctrl_addr = pipe->ctrl_addr;
338
339 lockdep_assert_held(&ar_pci->ce_lock);
340
341
342
343
344
345
346 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
347 return;
348
349 if (WARN_ON_ONCE(src_ring->write_index ==
350 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
351 return;
352
353 src_ring->write_index--;
354 src_ring->write_index &= src_ring->nentries_mask;
355
356 src_ring->per_transfer_context[src_ring->write_index] = NULL;
357}
358
359int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
360 void *per_transfer_context,
361 u32 buffer,
362 unsigned int nbytes,
363 unsigned int transfer_id,
364 unsigned int flags)
365{
366 struct ath10k *ar = ce_state->ar;
367 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
368 int ret;
369
370 spin_lock_bh(&ar_pci->ce_lock);
371 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
372 buffer, nbytes, transfer_id, flags);
373 spin_unlock_bh(&ar_pci->ce_lock);
374
375 return ret;
376}
377
378int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
379{
380 struct ath10k *ar = pipe->ar;
381 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
382 int delta;
383
384 spin_lock_bh(&ar_pci->ce_lock);
385 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
386 pipe->src_ring->write_index,
387 pipe->src_ring->sw_index - 1);
388 spin_unlock_bh(&ar_pci->ce_lock);
389
390 return delta;
391}
392
393int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
394 void *per_recv_context,
395 u32 buffer)
396{
397 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
398 u32 ctrl_addr = ce_state->ctrl_addr;
399 struct ath10k *ar = ce_state->ar;
400 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
401 unsigned int nentries_mask = dest_ring->nentries_mask;
402 unsigned int write_index;
403 unsigned int sw_index;
404 int ret;
405
406 spin_lock_bh(&ar_pci->ce_lock);
407 write_index = dest_ring->write_index;
408 sw_index = dest_ring->sw_index;
409
410 ret = ath10k_pci_wake(ar);
411 if (ret)
412 goto out;
413
414 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
415 struct ce_desc *base = dest_ring->base_addr_owner_space;
416 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
417
418
419 desc->addr = __cpu_to_le32(buffer);
420 desc->nbytes = 0;
421
422 dest_ring->per_transfer_context[write_index] =
423 per_recv_context;
424
425
426 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
427 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
428 dest_ring->write_index = write_index;
429 ret = 0;
430 } else {
431 ret = -EIO;
432 }
433 ath10k_pci_sleep(ar);
434
435out:
436 spin_unlock_bh(&ar_pci->ce_lock);
437
438 return ret;
439}
440
441
442
443
444
445static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
446 void **per_transfer_contextp,
447 u32 *bufferp,
448 unsigned int *nbytesp,
449 unsigned int *transfer_idp,
450 unsigned int *flagsp)
451{
452 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
453 unsigned int nentries_mask = dest_ring->nentries_mask;
454 unsigned int sw_index = dest_ring->sw_index;
455
456 struct ce_desc *base = dest_ring->base_addr_owner_space;
457 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
458 struct ce_desc sdesc;
459 u16 nbytes;
460
461
462 sdesc = *desc;
463
464 nbytes = __le16_to_cpu(sdesc.nbytes);
465 if (nbytes == 0) {
466
467
468
469
470
471
472 return -EIO;
473 }
474
475 desc->nbytes = 0;
476
477
478 *bufferp = __le32_to_cpu(sdesc.addr);
479 *nbytesp = nbytes;
480 *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
481
482 if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
483 *flagsp = CE_RECV_FLAG_SWAPPED;
484 else
485 *flagsp = 0;
486
487 if (per_transfer_contextp)
488 *per_transfer_contextp =
489 dest_ring->per_transfer_context[sw_index];
490
491
492 dest_ring->per_transfer_context[sw_index] = NULL;
493
494
495 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
496 dest_ring->sw_index = sw_index;
497
498 return 0;
499}
500
501int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
502 void **per_transfer_contextp,
503 u32 *bufferp,
504 unsigned int *nbytesp,
505 unsigned int *transfer_idp,
506 unsigned int *flagsp)
507{
508 struct ath10k *ar = ce_state->ar;
509 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
510 int ret;
511
512 spin_lock_bh(&ar_pci->ce_lock);
513 ret = ath10k_ce_completed_recv_next_nolock(ce_state,
514 per_transfer_contextp,
515 bufferp, nbytesp,
516 transfer_idp, flagsp);
517 spin_unlock_bh(&ar_pci->ce_lock);
518
519 return ret;
520}
521
522int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
523 void **per_transfer_contextp,
524 u32 *bufferp)
525{
526 struct ath10k_ce_ring *dest_ring;
527 unsigned int nentries_mask;
528 unsigned int sw_index;
529 unsigned int write_index;
530 int ret;
531 struct ath10k *ar;
532 struct ath10k_pci *ar_pci;
533
534 dest_ring = ce_state->dest_ring;
535
536 if (!dest_ring)
537 return -EIO;
538
539 ar = ce_state->ar;
540 ar_pci = ath10k_pci_priv(ar);
541
542 spin_lock_bh(&ar_pci->ce_lock);
543
544 nentries_mask = dest_ring->nentries_mask;
545 sw_index = dest_ring->sw_index;
546 write_index = dest_ring->write_index;
547 if (write_index != sw_index) {
548 struct ce_desc *base = dest_ring->base_addr_owner_space;
549 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
550
551
552 *bufferp = __le32_to_cpu(desc->addr);
553
554 if (per_transfer_contextp)
555 *per_transfer_contextp =
556 dest_ring->per_transfer_context[sw_index];
557
558
559 dest_ring->per_transfer_context[sw_index] = NULL;
560
561
562 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
563 dest_ring->sw_index = sw_index;
564 ret = 0;
565 } else {
566 ret = -EIO;
567 }
568
569 spin_unlock_bh(&ar_pci->ce_lock);
570
571 return ret;
572}
573
574
575
576
577
578static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
579 void **per_transfer_contextp,
580 u32 *bufferp,
581 unsigned int *nbytesp,
582 unsigned int *transfer_idp)
583{
584 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
585 u32 ctrl_addr = ce_state->ctrl_addr;
586 struct ath10k *ar = ce_state->ar;
587 unsigned int nentries_mask = src_ring->nentries_mask;
588 unsigned int sw_index = src_ring->sw_index;
589 struct ce_desc *sdesc, *sbase;
590 unsigned int read_index;
591 int ret;
592
593 if (src_ring->hw_index == sw_index) {
594
595
596
597
598
599
600
601
602 ret = ath10k_pci_wake(ar);
603 if (ret)
604 return ret;
605
606 src_ring->hw_index =
607 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
608 src_ring->hw_index &= nentries_mask;
609
610 ath10k_pci_sleep(ar);
611 }
612
613 read_index = src_ring->hw_index;
614
615 if ((read_index == sw_index) || (read_index == 0xffffffff))
616 return -EIO;
617
618 sbase = src_ring->shadow_base;
619 sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
620
621
622 *bufferp = __le32_to_cpu(sdesc->addr);
623 *nbytesp = __le16_to_cpu(sdesc->nbytes);
624 *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
625 CE_DESC_FLAGS_META_DATA);
626
627 if (per_transfer_contextp)
628 *per_transfer_contextp =
629 src_ring->per_transfer_context[sw_index];
630
631
632 src_ring->per_transfer_context[sw_index] = NULL;
633
634
635 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
636 src_ring->sw_index = sw_index;
637
638 return 0;
639}
640
641
642int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
643 void **per_transfer_contextp,
644 u32 *bufferp,
645 unsigned int *nbytesp,
646 unsigned int *transfer_idp)
647{
648 struct ath10k_ce_ring *src_ring;
649 unsigned int nentries_mask;
650 unsigned int sw_index;
651 unsigned int write_index;
652 int ret;
653 struct ath10k *ar;
654 struct ath10k_pci *ar_pci;
655
656 src_ring = ce_state->src_ring;
657
658 if (!src_ring)
659 return -EIO;
660
661 ar = ce_state->ar;
662 ar_pci = ath10k_pci_priv(ar);
663
664 spin_lock_bh(&ar_pci->ce_lock);
665
666 nentries_mask = src_ring->nentries_mask;
667 sw_index = src_ring->sw_index;
668 write_index = src_ring->write_index;
669
670 if (write_index != sw_index) {
671 struct ce_desc *base = src_ring->base_addr_owner_space;
672 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
673
674
675 *bufferp = __le32_to_cpu(desc->addr);
676 *nbytesp = __le16_to_cpu(desc->nbytes);
677 *transfer_idp = MS(__le16_to_cpu(desc->flags),
678 CE_DESC_FLAGS_META_DATA);
679
680 if (per_transfer_contextp)
681 *per_transfer_contextp =
682 src_ring->per_transfer_context[sw_index];
683
684
685 src_ring->per_transfer_context[sw_index] = NULL;
686
687
688 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
689 src_ring->sw_index = sw_index;
690 ret = 0;
691 } else {
692 ret = -EIO;
693 }
694
695 spin_unlock_bh(&ar_pci->ce_lock);
696
697 return ret;
698}
699
700int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
701 void **per_transfer_contextp,
702 u32 *bufferp,
703 unsigned int *nbytesp,
704 unsigned int *transfer_idp)
705{
706 struct ath10k *ar = ce_state->ar;
707 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
708 int ret;
709
710 spin_lock_bh(&ar_pci->ce_lock);
711 ret = ath10k_ce_completed_send_next_nolock(ce_state,
712 per_transfer_contextp,
713 bufferp, nbytesp,
714 transfer_idp);
715 spin_unlock_bh(&ar_pci->ce_lock);
716
717 return ret;
718}
719
720
721
722
723
724
725
726void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
727{
728 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
729 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
730 u32 ctrl_addr = ce_state->ctrl_addr;
731 int ret;
732
733 ret = ath10k_pci_wake(ar);
734 if (ret)
735 return;
736
737 spin_lock_bh(&ar_pci->ce_lock);
738
739
740 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
741 HOST_IS_COPY_COMPLETE_MASK);
742
743 spin_unlock_bh(&ar_pci->ce_lock);
744
745 if (ce_state->recv_cb)
746 ce_state->recv_cb(ce_state);
747
748 if (ce_state->send_cb)
749 ce_state->send_cb(ce_state);
750
751 spin_lock_bh(&ar_pci->ce_lock);
752
753
754
755
756
757 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
758
759 spin_unlock_bh(&ar_pci->ce_lock);
760 ath10k_pci_sleep(ar);
761}
762
763
764
765
766
767
768
769void ath10k_ce_per_engine_service_any(struct ath10k *ar)
770{
771 int ce_id, ret;
772 u32 intr_summary;
773
774 ret = ath10k_pci_wake(ar);
775 if (ret)
776 return;
777
778 intr_summary = CE_INTERRUPT_SUMMARY(ar);
779
780 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
781 if (intr_summary & (1 << ce_id))
782 intr_summary &= ~(1 << ce_id);
783 else
784
785 continue;
786
787 ath10k_ce_per_engine_service(ar, ce_id);
788 }
789
790 ath10k_pci_sleep(ar);
791}
792
793
794
795
796
797
798
799
800static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
801 int disable_copy_compl_intr)
802{
803 u32 ctrl_addr = ce_state->ctrl_addr;
804 struct ath10k *ar = ce_state->ar;
805 int ret;
806
807 ret = ath10k_pci_wake(ar);
808 if (ret)
809 return;
810
811 if ((!disable_copy_compl_intr) &&
812 (ce_state->send_cb || ce_state->recv_cb))
813 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
814 else
815 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
816
817 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
818
819 ath10k_pci_sleep(ar);
820}
821
822int ath10k_ce_disable_interrupts(struct ath10k *ar)
823{
824 int ce_id, ret;
825
826 ret = ath10k_pci_wake(ar);
827 if (ret)
828 return ret;
829
830 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
831 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
832
833 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
834 ath10k_ce_error_intr_disable(ar, ctrl_addr);
835 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
836 }
837
838 ath10k_pci_sleep(ar);
839
840 return 0;
841}
842
843void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
844 void (*send_cb)(struct ath10k_ce_pipe *),
845 int disable_interrupts)
846{
847 struct ath10k *ar = ce_state->ar;
848 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
849
850 spin_lock_bh(&ar_pci->ce_lock);
851 ce_state->send_cb = send_cb;
852 ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
853 spin_unlock_bh(&ar_pci->ce_lock);
854}
855
856void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
857 void (*recv_cb)(struct ath10k_ce_pipe *))
858{
859 struct ath10k *ar = ce_state->ar;
860 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
861
862 spin_lock_bh(&ar_pci->ce_lock);
863 ce_state->recv_cb = recv_cb;
864 ath10k_ce_per_engine_handler_adjust(ce_state, 0);
865 spin_unlock_bh(&ar_pci->ce_lock);
866}
867
868static int ath10k_ce_init_src_ring(struct ath10k *ar,
869 unsigned int ce_id,
870 const struct ce_attr *attr)
871{
872 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
873 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
874 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
875 u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
876
877 nentries = roundup_pow_of_two(attr->src_nentries);
878
879 memset(src_ring->per_transfer_context, 0,
880 nentries * sizeof(*src_ring->per_transfer_context));
881
882 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
883 src_ring->sw_index &= src_ring->nentries_mask;
884 src_ring->hw_index = src_ring->sw_index;
885
886 src_ring->write_index =
887 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
888 src_ring->write_index &= src_ring->nentries_mask;
889
890 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
891 src_ring->base_addr_ce_space);
892 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
893 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
894 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
895 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
896 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
897
898 ath10k_dbg(ATH10K_DBG_BOOT,
899 "boot init ce src ring id %d entries %d base_addr %p\n",
900 ce_id, nentries, src_ring->base_addr_owner_space);
901
902 return 0;
903}
904
905static int ath10k_ce_init_dest_ring(struct ath10k *ar,
906 unsigned int ce_id,
907 const struct ce_attr *attr)
908{
909 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
910 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
911 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
912 u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
913
914 nentries = roundup_pow_of_two(attr->dest_nentries);
915
916 memset(dest_ring->per_transfer_context, 0,
917 nentries * sizeof(*dest_ring->per_transfer_context));
918
919 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
920 dest_ring->sw_index &= dest_ring->nentries_mask;
921 dest_ring->write_index =
922 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
923 dest_ring->write_index &= dest_ring->nentries_mask;
924
925 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
926 dest_ring->base_addr_ce_space);
927 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
928 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
929 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
930 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
931
932 ath10k_dbg(ATH10K_DBG_BOOT,
933 "boot ce dest ring id %d entries %d base_addr %p\n",
934 ce_id, nentries, dest_ring->base_addr_owner_space);
935
936 return 0;
937}
938
939static struct ath10k_ce_ring *
940ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
941 const struct ce_attr *attr)
942{
943 struct ath10k_ce_ring *src_ring;
944 u32 nentries = attr->src_nentries;
945 dma_addr_t base_addr;
946
947 nentries = roundup_pow_of_two(nentries);
948
949 src_ring = kzalloc(sizeof(*src_ring) +
950 (nentries *
951 sizeof(*src_ring->per_transfer_context)),
952 GFP_KERNEL);
953 if (src_ring == NULL)
954 return ERR_PTR(-ENOMEM);
955
956 src_ring->nentries = nentries;
957 src_ring->nentries_mask = nentries - 1;
958
959
960
961
962
963 src_ring->base_addr_owner_space_unaligned =
964 dma_alloc_coherent(ar->dev,
965 (nentries * sizeof(struct ce_desc) +
966 CE_DESC_RING_ALIGN),
967 &base_addr, GFP_KERNEL);
968 if (!src_ring->base_addr_owner_space_unaligned) {
969 kfree(src_ring);
970 return ERR_PTR(-ENOMEM);
971 }
972
973 src_ring->base_addr_ce_space_unaligned = base_addr;
974
975 src_ring->base_addr_owner_space = PTR_ALIGN(
976 src_ring->base_addr_owner_space_unaligned,
977 CE_DESC_RING_ALIGN);
978 src_ring->base_addr_ce_space = ALIGN(
979 src_ring->base_addr_ce_space_unaligned,
980 CE_DESC_RING_ALIGN);
981
982
983
984
985
986 src_ring->shadow_base_unaligned =
987 kmalloc((nentries * sizeof(struct ce_desc) +
988 CE_DESC_RING_ALIGN), GFP_KERNEL);
989 if (!src_ring->shadow_base_unaligned) {
990 dma_free_coherent(ar->dev,
991 (nentries * sizeof(struct ce_desc) +
992 CE_DESC_RING_ALIGN),
993 src_ring->base_addr_owner_space,
994 src_ring->base_addr_ce_space);
995 kfree(src_ring);
996 return ERR_PTR(-ENOMEM);
997 }
998
999 src_ring->shadow_base = PTR_ALIGN(
1000 src_ring->shadow_base_unaligned,
1001 CE_DESC_RING_ALIGN);
1002
1003 return src_ring;
1004}
1005
1006static struct ath10k_ce_ring *
1007ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1008 const struct ce_attr *attr)
1009{
1010 struct ath10k_ce_ring *dest_ring;
1011 u32 nentries;
1012 dma_addr_t base_addr;
1013
1014 nentries = roundup_pow_of_two(attr->dest_nentries);
1015
1016 dest_ring = kzalloc(sizeof(*dest_ring) +
1017 (nentries *
1018 sizeof(*dest_ring->per_transfer_context)),
1019 GFP_KERNEL);
1020 if (dest_ring == NULL)
1021 return ERR_PTR(-ENOMEM);
1022
1023 dest_ring->nentries = nentries;
1024 dest_ring->nentries_mask = nentries - 1;
1025
1026
1027
1028
1029
1030 dest_ring->base_addr_owner_space_unaligned =
1031 dma_alloc_coherent(ar->dev,
1032 (nentries * sizeof(struct ce_desc) +
1033 CE_DESC_RING_ALIGN),
1034 &base_addr, GFP_KERNEL);
1035 if (!dest_ring->base_addr_owner_space_unaligned) {
1036 kfree(dest_ring);
1037 return ERR_PTR(-ENOMEM);
1038 }
1039
1040 dest_ring->base_addr_ce_space_unaligned = base_addr;
1041
1042
1043
1044
1045
1046 memset(dest_ring->base_addr_owner_space_unaligned, 0,
1047 nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
1048
1049 dest_ring->base_addr_owner_space = PTR_ALIGN(
1050 dest_ring->base_addr_owner_space_unaligned,
1051 CE_DESC_RING_ALIGN);
1052 dest_ring->base_addr_ce_space = ALIGN(
1053 dest_ring->base_addr_ce_space_unaligned,
1054 CE_DESC_RING_ALIGN);
1055
1056 return dest_ring;
1057}
1058
1059
1060
1061
1062
1063
1064
1065
1066int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1067 const struct ce_attr *attr)
1068{
1069 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1070 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1071 int ret;
1072
1073
1074
1075
1076
1077
1078
1079 BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
1080 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1081 BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
1082 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1083
1084 ret = ath10k_pci_wake(ar);
1085 if (ret)
1086 return ret;
1087
1088 spin_lock_bh(&ar_pci->ce_lock);
1089 ce_state->ar = ar;
1090 ce_state->id = ce_id;
1091 ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
1092 ce_state->attr_flags = attr->flags;
1093 ce_state->src_sz_max = attr->src_sz_max;
1094 spin_unlock_bh(&ar_pci->ce_lock);
1095
1096 if (attr->src_nentries) {
1097 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1098 if (ret) {
1099 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
1100 ce_id, ret);
1101 goto out;
1102 }
1103 }
1104
1105 if (attr->dest_nentries) {
1106 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1107 if (ret) {
1108 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
1109 ce_id, ret);
1110 goto out;
1111 }
1112 }
1113
1114out:
1115 ath10k_pci_sleep(ar);
1116 return ret;
1117}
1118
1119static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1120{
1121 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1122
1123 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
1124 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1125 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1126 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1127}
1128
1129static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1130{
1131 u32 ctrl_addr = ath10k_ce_base_address(ce_id);
1132
1133 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
1134 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1135 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1136}
1137
1138void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1139{
1140 int ret;
1141
1142 ret = ath10k_pci_wake(ar);
1143 if (ret)
1144 return;
1145
1146 ath10k_ce_deinit_src_ring(ar, ce_id);
1147 ath10k_ce_deinit_dest_ring(ar, ce_id);
1148
1149 ath10k_pci_sleep(ar);
1150}
1151
1152int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1153 const struct ce_attr *attr)
1154{
1155 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1156 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1157 int ret;
1158
1159 if (attr->src_nentries) {
1160 ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
1161 if (IS_ERR(ce_state->src_ring)) {
1162 ret = PTR_ERR(ce_state->src_ring);
1163 ath10k_err("failed to allocate copy engine source ring %d: %d\n",
1164 ce_id, ret);
1165 ce_state->src_ring = NULL;
1166 return ret;
1167 }
1168 }
1169
1170 if (attr->dest_nentries) {
1171 ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
1172 attr);
1173 if (IS_ERR(ce_state->dest_ring)) {
1174 ret = PTR_ERR(ce_state->dest_ring);
1175 ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
1176 ce_id, ret);
1177 ce_state->dest_ring = NULL;
1178 return ret;
1179 }
1180 }
1181
1182 return 0;
1183}
1184
1185void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1186{
1187 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1188 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1189
1190 if (ce_state->src_ring) {
1191 kfree(ce_state->src_ring->shadow_base_unaligned);
1192 dma_free_coherent(ar->dev,
1193 (ce_state->src_ring->nentries *
1194 sizeof(struct ce_desc) +
1195 CE_DESC_RING_ALIGN),
1196 ce_state->src_ring->base_addr_owner_space,
1197 ce_state->src_ring->base_addr_ce_space);
1198 kfree(ce_state->src_ring);
1199 }
1200
1201 if (ce_state->dest_ring) {
1202 dma_free_coherent(ar->dev,
1203 (ce_state->dest_ring->nentries *
1204 sizeof(struct ce_desc) +
1205 CE_DESC_RING_ALIGN),
1206 ce_state->dest_ring->base_addr_owner_space,
1207 ce_state->dest_ring->base_addr_ce_space);
1208 kfree(ce_state->dest_ring);
1209 }
1210
1211 ce_state->src_ring = NULL;
1212 ce_state->dest_ring = NULL;
1213}
1214