1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "hif.h"
19#include "ce.h"
20#include "debug.h"
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61static inline unsigned int
62ath10k_set_ring_byte(unsigned int offset,
63 struct ath10k_hw_ce_regs_addr_map *addr_map)
64{
65 return ((offset << addr_map->lsb) & addr_map->mask);
66}
67
68static inline unsigned int
69ath10k_get_ring_byte(unsigned int offset,
70 struct ath10k_hw_ce_regs_addr_map *addr_map)
71{
72 return ((offset & addr_map->mask) >> (addr_map->lsb));
73}
74
75static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
76{
77 struct ath10k_ce *ce = ath10k_ce_priv(ar);
78
79 return ce->bus_ops->read32(ar, offset);
80}
81
82static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
83{
84 struct ath10k_ce *ce = ath10k_ce_priv(ar);
85
86 ce->bus_ops->write32(ar, offset, value);
87}
88
89static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
90 u32 ce_ctrl_addr,
91 unsigned int n)
92{
93 ath10k_ce_write32(ar, ce_ctrl_addr +
94 ar->hw_ce_regs->dst_wr_index_addr, n);
95}
96
97static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
98 u32 ce_ctrl_addr)
99{
100 return ath10k_ce_read32(ar, ce_ctrl_addr +
101 ar->hw_ce_regs->dst_wr_index_addr);
102}
103
104static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
105 u32 ce_ctrl_addr,
106 unsigned int n)
107{
108 ath10k_ce_write32(ar, ce_ctrl_addr +
109 ar->hw_ce_regs->sr_wr_index_addr, n);
110}
111
112static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
113 u32 ce_ctrl_addr)
114{
115 return ath10k_ce_read32(ar, ce_ctrl_addr +
116 ar->hw_ce_regs->sr_wr_index_addr);
117}
118
119static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
120 u32 ce_ctrl_addr)
121{
122 return ath10k_ce_read32(ar, ce_ctrl_addr +
123 ar->hw_ce_regs->current_srri_addr);
124}
125
126static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
127 u32 ce_ctrl_addr,
128 unsigned int addr)
129{
130 ath10k_ce_write32(ar, ce_ctrl_addr +
131 ar->hw_ce_regs->sr_base_addr, addr);
132}
133
134static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
135 u32 ce_ctrl_addr,
136 unsigned int n)
137{
138 ath10k_ce_write32(ar, ce_ctrl_addr +
139 ar->hw_ce_regs->sr_size_addr, n);
140}
141
142static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
143 u32 ce_ctrl_addr,
144 unsigned int n)
145{
146 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
147
148 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
149 ctrl_regs->addr);
150
151 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
152 (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
153 ath10k_set_ring_byte(n, ctrl_regs->dmax));
154}
155
156static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
157 u32 ce_ctrl_addr,
158 unsigned int n)
159{
160 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
161
162 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
163 ctrl_regs->addr);
164
165 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
166 (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
167 ath10k_set_ring_byte(n, ctrl_regs->src_ring));
168}
169
170static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
171 u32 ce_ctrl_addr,
172 unsigned int n)
173{
174 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
175
176 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
177 ctrl_regs->addr);
178
179 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
180 (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
181 ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
182}
183
184static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
185 u32 ce_ctrl_addr)
186{
187 return ath10k_ce_read32(ar, ce_ctrl_addr +
188 ar->hw_ce_regs->current_drri_addr);
189}
190
191static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
192 u32 ce_ctrl_addr,
193 u32 addr)
194{
195 ath10k_ce_write32(ar, ce_ctrl_addr +
196 ar->hw_ce_regs->dr_base_addr, addr);
197}
198
199static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
200 u32 ce_ctrl_addr,
201 unsigned int n)
202{
203 ath10k_ce_write32(ar, ce_ctrl_addr +
204 ar->hw_ce_regs->dr_size_addr, n);
205}
206
207static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
208 u32 ce_ctrl_addr,
209 unsigned int n)
210{
211 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
212 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
213
214 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
215 (addr & ~(srcr_wm->wm_high->mask)) |
216 (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
217}
218
219static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
220 u32 ce_ctrl_addr,
221 unsigned int n)
222{
223 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
224 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
225
226 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
227 (addr & ~(srcr_wm->wm_low->mask)) |
228 (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
229}
230
231static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
232 u32 ce_ctrl_addr,
233 unsigned int n)
234{
235 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
236 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
237
238 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
239 (addr & ~(dstr_wm->wm_high->mask)) |
240 (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
241}
242
243static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
244 u32 ce_ctrl_addr,
245 unsigned int n)
246{
247 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
248 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
249
250 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
251 (addr & ~(dstr_wm->wm_low->mask)) |
252 (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
253}
254
255static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
256 u32 ce_ctrl_addr)
257{
258 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
259
260 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
261 ar->hw_ce_regs->host_ie_addr);
262
263 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
264 host_ie_addr | host_ie->copy_complete->mask);
265}
266
267static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
268 u32 ce_ctrl_addr)
269{
270 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
271
272 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
273 ar->hw_ce_regs->host_ie_addr);
274
275 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
276 host_ie_addr & ~(host_ie->copy_complete->mask));
277}
278
279static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
280 u32 ce_ctrl_addr)
281{
282 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
283
284 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
285 ar->hw_ce_regs->host_ie_addr);
286
287 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
288 host_ie_addr & ~(wm_regs->wm_mask));
289}
290
291static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
292 u32 ce_ctrl_addr)
293{
294 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
295
296 u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
297 ar->hw_ce_regs->misc_ie_addr);
298
299 ath10k_ce_write32(ar,
300 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
301 misc_ie_addr | misc_regs->err_mask);
302}
303
304static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
305 u32 ce_ctrl_addr)
306{
307 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
308
309 u32 misc_ie_addr = ath10k_ce_read32(ar,
310 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
311
312 ath10k_ce_write32(ar,
313 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
314 misc_ie_addr & ~(misc_regs->err_mask));
315}
316
317static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
318 u32 ce_ctrl_addr,
319 unsigned int mask)
320{
321 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
322
323 ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
324}
325
326
327
328
329
330static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
331 void *per_transfer_context,
332 dma_addr_t buffer,
333 unsigned int nbytes,
334 unsigned int transfer_id,
335 unsigned int flags)
336{
337 struct ath10k *ar = ce_state->ar;
338 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
339 struct ce_desc *desc, sdesc;
340 unsigned int nentries_mask = src_ring->nentries_mask;
341 unsigned int sw_index = src_ring->sw_index;
342 unsigned int write_index = src_ring->write_index;
343 u32 ctrl_addr = ce_state->ctrl_addr;
344 u32 desc_flags = 0;
345 int ret = 0;
346
347 if (nbytes > ce_state->src_sz_max)
348 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
349 __func__, nbytes, ce_state->src_sz_max);
350
351 if (unlikely(CE_RING_DELTA(nentries_mask,
352 write_index, sw_index - 1) <= 0)) {
353 ret = -ENOSR;
354 goto exit;
355 }
356
357 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
358 write_index);
359
360 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
361
362 if (flags & CE_SEND_FLAG_GATHER)
363 desc_flags |= CE_DESC_FLAGS_GATHER;
364 if (flags & CE_SEND_FLAG_BYTE_SWAP)
365 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
366
367 sdesc.addr = __cpu_to_le32(buffer);
368 sdesc.nbytes = __cpu_to_le16(nbytes);
369 sdesc.flags = __cpu_to_le16(desc_flags);
370
371 *desc = sdesc;
372
373 src_ring->per_transfer_context[write_index] = per_transfer_context;
374
375
376 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
377
378
379 if (!(flags & CE_SEND_FLAG_GATHER))
380 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
381
382 src_ring->write_index = write_index;
383exit:
384 return ret;
385}
386
387static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
388 void *per_transfer_context,
389 dma_addr_t buffer,
390 unsigned int nbytes,
391 unsigned int transfer_id,
392 unsigned int flags)
393{
394 struct ath10k *ar = ce_state->ar;
395 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
396 struct ce_desc_64 *desc, sdesc;
397 unsigned int nentries_mask = src_ring->nentries_mask;
398 unsigned int sw_index = src_ring->sw_index;
399 unsigned int write_index = src_ring->write_index;
400 u32 ctrl_addr = ce_state->ctrl_addr;
401 __le32 *addr;
402 u32 desc_flags = 0;
403 int ret = 0;
404
405 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
406 return -ESHUTDOWN;
407
408 if (nbytes > ce_state->src_sz_max)
409 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
410 __func__, nbytes, ce_state->src_sz_max);
411
412 if (unlikely(CE_RING_DELTA(nentries_mask,
413 write_index, sw_index - 1) <= 0)) {
414 ret = -ENOSR;
415 goto exit;
416 }
417
418 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
419 write_index);
420
421 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
422
423 if (flags & CE_SEND_FLAG_GATHER)
424 desc_flags |= CE_DESC_FLAGS_GATHER;
425
426 if (flags & CE_SEND_FLAG_BYTE_SWAP)
427 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
428
429 addr = (__le32 *)&sdesc.addr;
430
431 flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
432 addr[0] = __cpu_to_le32(buffer);
433 addr[1] = __cpu_to_le32(flags);
434 if (flags & CE_SEND_FLAG_GATHER)
435 addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
436 else
437 addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
438
439 sdesc.nbytes = __cpu_to_le16(nbytes);
440 sdesc.flags = __cpu_to_le16(desc_flags);
441
442 *desc = sdesc;
443
444 src_ring->per_transfer_context[write_index] = per_transfer_context;
445
446
447 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
448
449 if (!(flags & CE_SEND_FLAG_GATHER))
450 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
451
452 src_ring->write_index = write_index;
453exit:
454 return ret;
455}
456
457int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
458 void *per_transfer_context,
459 dma_addr_t buffer,
460 unsigned int nbytes,
461 unsigned int transfer_id,
462 unsigned int flags)
463{
464 return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
465 buffer, nbytes, transfer_id, flags);
466}
467
468void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
469{
470 struct ath10k *ar = pipe->ar;
471 struct ath10k_ce *ce = ath10k_ce_priv(ar);
472 struct ath10k_ce_ring *src_ring = pipe->src_ring;
473 u32 ctrl_addr = pipe->ctrl_addr;
474
475 lockdep_assert_held(&ce->ce_lock);
476
477
478
479
480
481
482 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
483 return;
484
485 if (WARN_ON_ONCE(src_ring->write_index ==
486 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
487 return;
488
489 src_ring->write_index--;
490 src_ring->write_index &= src_ring->nentries_mask;
491
492 src_ring->per_transfer_context[src_ring->write_index] = NULL;
493}
494
495int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
496 void *per_transfer_context,
497 dma_addr_t buffer,
498 unsigned int nbytes,
499 unsigned int transfer_id,
500 unsigned int flags)
501{
502 struct ath10k *ar = ce_state->ar;
503 struct ath10k_ce *ce = ath10k_ce_priv(ar);
504 int ret;
505
506 spin_lock_bh(&ce->ce_lock);
507 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
508 buffer, nbytes, transfer_id, flags);
509 spin_unlock_bh(&ce->ce_lock);
510
511 return ret;
512}
513
514int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
515{
516 struct ath10k *ar = pipe->ar;
517 struct ath10k_ce *ce = ath10k_ce_priv(ar);
518 int delta;
519
520 spin_lock_bh(&ce->ce_lock);
521 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
522 pipe->src_ring->write_index,
523 pipe->src_ring->sw_index - 1);
524 spin_unlock_bh(&ce->ce_lock);
525
526 return delta;
527}
528
529int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
530{
531 struct ath10k *ar = pipe->ar;
532 struct ath10k_ce *ce = ath10k_ce_priv(ar);
533 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
534 unsigned int nentries_mask = dest_ring->nentries_mask;
535 unsigned int write_index = dest_ring->write_index;
536 unsigned int sw_index = dest_ring->sw_index;
537
538 lockdep_assert_held(&ce->ce_lock);
539
540 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
541}
542
543static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
544 dma_addr_t paddr)
545{
546 struct ath10k *ar = pipe->ar;
547 struct ath10k_ce *ce = ath10k_ce_priv(ar);
548 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
549 unsigned int nentries_mask = dest_ring->nentries_mask;
550 unsigned int write_index = dest_ring->write_index;
551 unsigned int sw_index = dest_ring->sw_index;
552 struct ce_desc *base = dest_ring->base_addr_owner_space;
553 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
554 u32 ctrl_addr = pipe->ctrl_addr;
555
556 lockdep_assert_held(&ce->ce_lock);
557
558 if ((pipe->id != 5) &&
559 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
560 return -ENOSPC;
561
562 desc->addr = __cpu_to_le32(paddr);
563 desc->nbytes = 0;
564
565 dest_ring->per_transfer_context[write_index] = ctx;
566 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
567 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
568 dest_ring->write_index = write_index;
569
570 return 0;
571}
572
573static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
574 void *ctx,
575 dma_addr_t paddr)
576{
577 struct ath10k *ar = pipe->ar;
578 struct ath10k_ce *ce = ath10k_ce_priv(ar);
579 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
580 unsigned int nentries_mask = dest_ring->nentries_mask;
581 unsigned int write_index = dest_ring->write_index;
582 unsigned int sw_index = dest_ring->sw_index;
583 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
584 struct ce_desc_64 *desc =
585 CE_DEST_RING_TO_DESC_64(base, write_index);
586 u32 ctrl_addr = pipe->ctrl_addr;
587
588 lockdep_assert_held(&ce->ce_lock);
589
590 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
591 return -ENOSPC;
592
593 desc->addr = __cpu_to_le64(paddr);
594 desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
595
596 desc->nbytes = 0;
597
598 dest_ring->per_transfer_context[write_index] = ctx;
599 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
600 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
601 dest_ring->write_index = write_index;
602
603 return 0;
604}
605
606void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
607{
608 struct ath10k *ar = pipe->ar;
609 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
610 unsigned int nentries_mask = dest_ring->nentries_mask;
611 unsigned int write_index = dest_ring->write_index;
612 u32 ctrl_addr = pipe->ctrl_addr;
613 u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
614
615
616
617
618 if ((cur_write_idx + nentries) == dest_ring->sw_index)
619 nentries -= 1;
620
621 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
622 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
623 dest_ring->write_index = write_index;
624}
625
626int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
627 dma_addr_t paddr)
628{
629 struct ath10k *ar = pipe->ar;
630 struct ath10k_ce *ce = ath10k_ce_priv(ar);
631 int ret;
632
633 spin_lock_bh(&ce->ce_lock);
634 ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
635 spin_unlock_bh(&ce->ce_lock);
636
637 return ret;
638}
639
640
641
642
643
644static int
645 _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
646 void **per_transfer_contextp,
647 unsigned int *nbytesp)
648{
649 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
650 unsigned int nentries_mask = dest_ring->nentries_mask;
651 unsigned int sw_index = dest_ring->sw_index;
652
653 struct ce_desc *base = dest_ring->base_addr_owner_space;
654 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
655 struct ce_desc sdesc;
656 u16 nbytes;
657
658
659 sdesc = *desc;
660
661 nbytes = __le16_to_cpu(sdesc.nbytes);
662 if (nbytes == 0) {
663
664
665
666
667
668
669 return -EIO;
670 }
671
672 desc->nbytes = 0;
673
674
675 *nbytesp = nbytes;
676
677 if (per_transfer_contextp)
678 *per_transfer_contextp =
679 dest_ring->per_transfer_context[sw_index];
680
681
682
683
684 if (ce_state->id != 5)
685 dest_ring->per_transfer_context[sw_index] = NULL;
686
687
688 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
689 dest_ring->sw_index = sw_index;
690
691 return 0;
692}
693
694static int
695_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
696 void **per_transfer_contextp,
697 unsigned int *nbytesp)
698{
699 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
700 unsigned int nentries_mask = dest_ring->nentries_mask;
701 unsigned int sw_index = dest_ring->sw_index;
702 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
703 struct ce_desc_64 *desc =
704 CE_DEST_RING_TO_DESC_64(base, sw_index);
705 struct ce_desc_64 sdesc;
706 u16 nbytes;
707
708
709 sdesc = *desc;
710
711 nbytes = __le16_to_cpu(sdesc.nbytes);
712 if (nbytes == 0) {
713
714
715
716
717
718 return -EIO;
719 }
720
721 desc->nbytes = 0;
722
723
724 *nbytesp = nbytes;
725
726 if (per_transfer_contextp)
727 *per_transfer_contextp =
728 dest_ring->per_transfer_context[sw_index];
729
730
731
732
733 if (ce_state->id != 5)
734 dest_ring->per_transfer_context[sw_index] = NULL;
735
736
737 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
738 dest_ring->sw_index = sw_index;
739
740 return 0;
741}
742
743int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
744 void **per_transfer_ctx,
745 unsigned int *nbytesp)
746{
747 return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
748 per_transfer_ctx,
749 nbytesp);
750}
751
752int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
753 void **per_transfer_contextp,
754 unsigned int *nbytesp)
755{
756 struct ath10k *ar = ce_state->ar;
757 struct ath10k_ce *ce = ath10k_ce_priv(ar);
758 int ret;
759
760 spin_lock_bh(&ce->ce_lock);
761 ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
762 per_transfer_contextp,
763 nbytesp);
764
765 spin_unlock_bh(&ce->ce_lock);
766
767 return ret;
768}
769
770static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
771 void **per_transfer_contextp,
772 dma_addr_t *bufferp)
773{
774 struct ath10k_ce_ring *dest_ring;
775 unsigned int nentries_mask;
776 unsigned int sw_index;
777 unsigned int write_index;
778 int ret;
779 struct ath10k *ar;
780 struct ath10k_ce *ce;
781
782 dest_ring = ce_state->dest_ring;
783
784 if (!dest_ring)
785 return -EIO;
786
787 ar = ce_state->ar;
788 ce = ath10k_ce_priv(ar);
789
790 spin_lock_bh(&ce->ce_lock);
791
792 nentries_mask = dest_ring->nentries_mask;
793 sw_index = dest_ring->sw_index;
794 write_index = dest_ring->write_index;
795 if (write_index != sw_index) {
796 struct ce_desc *base = dest_ring->base_addr_owner_space;
797 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
798
799
800 *bufferp = __le32_to_cpu(desc->addr);
801
802 if (per_transfer_contextp)
803 *per_transfer_contextp =
804 dest_ring->per_transfer_context[sw_index];
805
806
807 dest_ring->per_transfer_context[sw_index] = NULL;
808 desc->nbytes = 0;
809
810
811 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
812 dest_ring->sw_index = sw_index;
813 ret = 0;
814 } else {
815 ret = -EIO;
816 }
817
818 spin_unlock_bh(&ce->ce_lock);
819
820 return ret;
821}
822
823static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
824 void **per_transfer_contextp,
825 dma_addr_t *bufferp)
826{
827 struct ath10k_ce_ring *dest_ring;
828 unsigned int nentries_mask;
829 unsigned int sw_index;
830 unsigned int write_index;
831 int ret;
832 struct ath10k *ar;
833 struct ath10k_ce *ce;
834
835 dest_ring = ce_state->dest_ring;
836
837 if (!dest_ring)
838 return -EIO;
839
840 ar = ce_state->ar;
841 ce = ath10k_ce_priv(ar);
842
843 spin_lock_bh(&ce->ce_lock);
844
845 nentries_mask = dest_ring->nentries_mask;
846 sw_index = dest_ring->sw_index;
847 write_index = dest_ring->write_index;
848 if (write_index != sw_index) {
849 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
850 struct ce_desc_64 *desc =
851 CE_DEST_RING_TO_DESC_64(base, sw_index);
852
853
854 *bufferp = __le64_to_cpu(desc->addr);
855
856 if (per_transfer_contextp)
857 *per_transfer_contextp =
858 dest_ring->per_transfer_context[sw_index];
859
860
861 dest_ring->per_transfer_context[sw_index] = NULL;
862 desc->nbytes = 0;
863
864
865 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
866 dest_ring->sw_index = sw_index;
867 ret = 0;
868 } else {
869 ret = -EIO;
870 }
871
872 spin_unlock_bh(&ce->ce_lock);
873
874 return ret;
875}
876
877int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
878 void **per_transfer_contextp,
879 dma_addr_t *bufferp)
880{
881 return ce_state->ops->ce_revoke_recv_next(ce_state,
882 per_transfer_contextp,
883 bufferp);
884}
885
886
887
888
889
890int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
891 void **per_transfer_contextp)
892{
893 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
894 u32 ctrl_addr = ce_state->ctrl_addr;
895 struct ath10k *ar = ce_state->ar;
896 unsigned int nentries_mask = src_ring->nentries_mask;
897 unsigned int sw_index = src_ring->sw_index;
898 unsigned int read_index;
899 struct ce_desc *desc;
900
901 if (src_ring->hw_index == sw_index) {
902
903
904
905
906
907
908
909
910 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
911 if (read_index == 0xffffffff)
912 return -ENODEV;
913
914 read_index &= nentries_mask;
915 src_ring->hw_index = read_index;
916 }
917
918 read_index = src_ring->hw_index;
919
920 if (read_index == sw_index)
921 return -EIO;
922
923 if (per_transfer_contextp)
924 *per_transfer_contextp =
925 src_ring->per_transfer_context[sw_index];
926
927
928 src_ring->per_transfer_context[sw_index] = NULL;
929 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
930 sw_index);
931 desc->nbytes = 0;
932
933
934 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
935 src_ring->sw_index = sw_index;
936
937 return 0;
938}
939
940static void ath10k_ce_extract_desc_data(struct ath10k *ar,
941 struct ath10k_ce_ring *src_ring,
942 u32 sw_index,
943 dma_addr_t *bufferp,
944 u32 *nbytesp,
945 u32 *transfer_idp)
946{
947 struct ce_desc *base = src_ring->base_addr_owner_space;
948 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
949
950
951 *bufferp = __le32_to_cpu(desc->addr);
952 *nbytesp = __le16_to_cpu(desc->nbytes);
953 *transfer_idp = MS(__le16_to_cpu(desc->flags),
954 CE_DESC_FLAGS_META_DATA);
955}
956
957static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
958 struct ath10k_ce_ring *src_ring,
959 u32 sw_index,
960 dma_addr_t *bufferp,
961 u32 *nbytesp,
962 u32 *transfer_idp)
963{
964 struct ce_desc_64 *base = src_ring->base_addr_owner_space;
965 struct ce_desc_64 *desc =
966 CE_SRC_RING_TO_DESC_64(base, sw_index);
967
968
969 *bufferp = __le64_to_cpu(desc->addr);
970 *nbytesp = __le16_to_cpu(desc->nbytes);
971 *transfer_idp = MS(__le16_to_cpu(desc->flags),
972 CE_DESC_FLAGS_META_DATA);
973}
974
975
976int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
977 void **per_transfer_contextp,
978 dma_addr_t *bufferp,
979 unsigned int *nbytesp,
980 unsigned int *transfer_idp)
981{
982 struct ath10k_ce_ring *src_ring;
983 unsigned int nentries_mask;
984 unsigned int sw_index;
985 unsigned int write_index;
986 int ret;
987 struct ath10k *ar;
988 struct ath10k_ce *ce;
989
990 src_ring = ce_state->src_ring;
991
992 if (!src_ring)
993 return -EIO;
994
995 ar = ce_state->ar;
996 ce = ath10k_ce_priv(ar);
997
998 spin_lock_bh(&ce->ce_lock);
999
1000 nentries_mask = src_ring->nentries_mask;
1001 sw_index = src_ring->sw_index;
1002 write_index = src_ring->write_index;
1003
1004 if (write_index != sw_index) {
1005 ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
1006 bufferp, nbytesp,
1007 transfer_idp);
1008
1009 if (per_transfer_contextp)
1010 *per_transfer_contextp =
1011 src_ring->per_transfer_context[sw_index];
1012
1013
1014 src_ring->per_transfer_context[sw_index] = NULL;
1015
1016
1017 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1018 src_ring->sw_index = sw_index;
1019 ret = 0;
1020 } else {
1021 ret = -EIO;
1022 }
1023
1024 spin_unlock_bh(&ce->ce_lock);
1025
1026 return ret;
1027}
1028
1029int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
1030 void **per_transfer_contextp)
1031{
1032 struct ath10k *ar = ce_state->ar;
1033 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1034 int ret;
1035
1036 spin_lock_bh(&ce->ce_lock);
1037 ret = ath10k_ce_completed_send_next_nolock(ce_state,
1038 per_transfer_contextp);
1039 spin_unlock_bh(&ce->ce_lock);
1040
1041 return ret;
1042}
1043
1044
1045
1046
1047
1048
1049
1050void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
1051{
1052 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1053 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1054 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
1055 u32 ctrl_addr = ce_state->ctrl_addr;
1056
1057 spin_lock_bh(&ce->ce_lock);
1058
1059
1060 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
1061 wm_regs->cc_mask);
1062
1063 spin_unlock_bh(&ce->ce_lock);
1064
1065 if (ce_state->recv_cb)
1066 ce_state->recv_cb(ce_state);
1067
1068 if (ce_state->send_cb)
1069 ce_state->send_cb(ce_state);
1070
1071 spin_lock_bh(&ce->ce_lock);
1072
1073
1074
1075
1076
1077 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
1078
1079 spin_unlock_bh(&ce->ce_lock);
1080}
1081
1082
1083
1084
1085
1086
1087
1088void ath10k_ce_per_engine_service_any(struct ath10k *ar)
1089{
1090 int ce_id;
1091 u32 intr_summary;
1092
1093 intr_summary = ath10k_ce_interrupt_summary(ar);
1094
1095 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
1096 if (intr_summary & (1 << ce_id))
1097 intr_summary &= ~(1 << ce_id);
1098 else
1099
1100 continue;
1101
1102 ath10k_ce_per_engine_service(ar, ce_id);
1103 }
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
1114{
1115 u32 ctrl_addr = ce_state->ctrl_addr;
1116 struct ath10k *ar = ce_state->ar;
1117 bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
1118
1119 if ((!disable_copy_compl_intr) &&
1120 (ce_state->send_cb || ce_state->recv_cb))
1121 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
1122 else
1123 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1124
1125 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1126}
1127
1128int ath10k_ce_disable_interrupts(struct ath10k *ar)
1129{
1130 int ce_id;
1131
1132 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
1133 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1134
1135 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1136 ath10k_ce_error_intr_disable(ar, ctrl_addr);
1137 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1138 }
1139
1140 return 0;
1141}
1142
1143void ath10k_ce_enable_interrupts(struct ath10k *ar)
1144{
1145 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1146 int ce_id;
1147 struct ath10k_ce_pipe *ce_state;
1148
1149
1150
1151
1152 for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) {
1153 ce_state = &ce->ce_states[ce_id];
1154 ath10k_ce_per_engine_handler_adjust(ce_state);
1155 }
1156}
1157
1158static int ath10k_ce_init_src_ring(struct ath10k *ar,
1159 unsigned int ce_id,
1160 const struct ce_attr *attr)
1161{
1162 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1163 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1164 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1165 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1166
1167 nentries = roundup_pow_of_two(attr->src_nentries);
1168
1169 if (ar->hw_params.target_64bit)
1170 memset(src_ring->base_addr_owner_space, 0,
1171 nentries * sizeof(struct ce_desc_64));
1172 else
1173 memset(src_ring->base_addr_owner_space, 0,
1174 nentries * sizeof(struct ce_desc));
1175
1176 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1177 src_ring->sw_index &= src_ring->nentries_mask;
1178 src_ring->hw_index = src_ring->sw_index;
1179
1180 src_ring->write_index =
1181 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
1182 src_ring->write_index &= src_ring->nentries_mask;
1183
1184 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
1185 src_ring->base_addr_ce_space);
1186 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
1187 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
1188 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
1189 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
1190 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
1191
1192 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1193 "boot init ce src ring id %d entries %d base_addr %pK\n",
1194 ce_id, nentries, src_ring->base_addr_owner_space);
1195
1196 return 0;
1197}
1198
1199static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1200 unsigned int ce_id,
1201 const struct ce_attr *attr)
1202{
1203 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1204 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1205 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
1206 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1207
1208 nentries = roundup_pow_of_two(attr->dest_nentries);
1209
1210 if (ar->hw_params.target_64bit)
1211 memset(dest_ring->base_addr_owner_space, 0,
1212 nentries * sizeof(struct ce_desc_64));
1213 else
1214 memset(dest_ring->base_addr_owner_space, 0,
1215 nentries * sizeof(struct ce_desc));
1216
1217 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1218 dest_ring->sw_index &= dest_ring->nentries_mask;
1219 dest_ring->write_index =
1220 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1221 dest_ring->write_index &= dest_ring->nentries_mask;
1222
1223 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
1224 dest_ring->base_addr_ce_space);
1225 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1226 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1227 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1228 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1229
1230 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1231 "boot ce dest ring id %d entries %d base_addr %pK\n",
1232 ce_id, nentries, dest_ring->base_addr_owner_space);
1233
1234 return 0;
1235}
1236
1237static struct ath10k_ce_ring *
1238ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
1239 const struct ce_attr *attr)
1240{
1241 struct ath10k_ce_ring *src_ring;
1242 u32 nentries = attr->src_nentries;
1243 dma_addr_t base_addr;
1244
1245 nentries = roundup_pow_of_two(nentries);
1246
1247 src_ring = kzalloc(sizeof(*src_ring) +
1248 (nentries *
1249 sizeof(*src_ring->per_transfer_context)),
1250 GFP_KERNEL);
1251 if (src_ring == NULL)
1252 return ERR_PTR(-ENOMEM);
1253
1254 src_ring->nentries = nentries;
1255 src_ring->nentries_mask = nentries - 1;
1256
1257
1258
1259
1260
1261 src_ring->base_addr_owner_space_unaligned =
1262 dma_alloc_coherent(ar->dev,
1263 (nentries * sizeof(struct ce_desc) +
1264 CE_DESC_RING_ALIGN),
1265 &base_addr, GFP_KERNEL);
1266 if (!src_ring->base_addr_owner_space_unaligned) {
1267 kfree(src_ring);
1268 return ERR_PTR(-ENOMEM);
1269 }
1270
1271 src_ring->base_addr_ce_space_unaligned = base_addr;
1272
1273 src_ring->base_addr_owner_space =
1274 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1275 CE_DESC_RING_ALIGN);
1276 src_ring->base_addr_ce_space =
1277 ALIGN(src_ring->base_addr_ce_space_unaligned,
1278 CE_DESC_RING_ALIGN);
1279
1280 return src_ring;
1281}
1282
1283static struct ath10k_ce_ring *
1284ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
1285 const struct ce_attr *attr)
1286{
1287 struct ath10k_ce_ring *src_ring;
1288 u32 nentries = attr->src_nentries;
1289 dma_addr_t base_addr;
1290
1291 nentries = roundup_pow_of_two(nentries);
1292
1293 src_ring = kzalloc(sizeof(*src_ring) +
1294 (nentries *
1295 sizeof(*src_ring->per_transfer_context)),
1296 GFP_KERNEL);
1297 if (!src_ring)
1298 return ERR_PTR(-ENOMEM);
1299
1300 src_ring->nentries = nentries;
1301 src_ring->nentries_mask = nentries - 1;
1302
1303
1304
1305
1306 src_ring->base_addr_owner_space_unaligned =
1307 dma_alloc_coherent(ar->dev,
1308 (nentries * sizeof(struct ce_desc_64) +
1309 CE_DESC_RING_ALIGN),
1310 &base_addr, GFP_KERNEL);
1311 if (!src_ring->base_addr_owner_space_unaligned) {
1312 kfree(src_ring);
1313 return ERR_PTR(-ENOMEM);
1314 }
1315
1316 src_ring->base_addr_ce_space_unaligned = base_addr;
1317
1318 src_ring->base_addr_owner_space =
1319 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1320 CE_DESC_RING_ALIGN);
1321 src_ring->base_addr_ce_space =
1322 ALIGN(src_ring->base_addr_ce_space_unaligned,
1323 CE_DESC_RING_ALIGN);
1324
1325 return src_ring;
1326}
1327
1328static struct ath10k_ce_ring *
1329ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1330 const struct ce_attr *attr)
1331{
1332 struct ath10k_ce_ring *dest_ring;
1333 u32 nentries;
1334 dma_addr_t base_addr;
1335
1336 nentries = roundup_pow_of_two(attr->dest_nentries);
1337
1338 dest_ring = kzalloc(sizeof(*dest_ring) +
1339 (nentries *
1340 sizeof(*dest_ring->per_transfer_context)),
1341 GFP_KERNEL);
1342 if (dest_ring == NULL)
1343 return ERR_PTR(-ENOMEM);
1344
1345 dest_ring->nentries = nentries;
1346 dest_ring->nentries_mask = nentries - 1;
1347
1348
1349
1350
1351
1352 dest_ring->base_addr_owner_space_unaligned =
1353 dma_zalloc_coherent(ar->dev,
1354 (nentries * sizeof(struct ce_desc) +
1355 CE_DESC_RING_ALIGN),
1356 &base_addr, GFP_KERNEL);
1357 if (!dest_ring->base_addr_owner_space_unaligned) {
1358 kfree(dest_ring);
1359 return ERR_PTR(-ENOMEM);
1360 }
1361
1362 dest_ring->base_addr_ce_space_unaligned = base_addr;
1363
1364 dest_ring->base_addr_owner_space =
1365 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1366 CE_DESC_RING_ALIGN);
1367 dest_ring->base_addr_ce_space =
1368 ALIGN(dest_ring->base_addr_ce_space_unaligned,
1369 CE_DESC_RING_ALIGN);
1370
1371 return dest_ring;
1372}
1373
1374static struct ath10k_ce_ring *
1375ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
1376 const struct ce_attr *attr)
1377{
1378 struct ath10k_ce_ring *dest_ring;
1379 u32 nentries;
1380 dma_addr_t base_addr;
1381
1382 nentries = roundup_pow_of_two(attr->dest_nentries);
1383
1384 dest_ring = kzalloc(sizeof(*dest_ring) +
1385 (nentries *
1386 sizeof(*dest_ring->per_transfer_context)),
1387 GFP_KERNEL);
1388 if (!dest_ring)
1389 return ERR_PTR(-ENOMEM);
1390
1391 dest_ring->nentries = nentries;
1392 dest_ring->nentries_mask = nentries - 1;
1393
1394
1395
1396
1397 dest_ring->base_addr_owner_space_unaligned =
1398 dma_alloc_coherent(ar->dev,
1399 (nentries * sizeof(struct ce_desc_64) +
1400 CE_DESC_RING_ALIGN),
1401 &base_addr, GFP_KERNEL);
1402 if (!dest_ring->base_addr_owner_space_unaligned) {
1403 kfree(dest_ring);
1404 return ERR_PTR(-ENOMEM);
1405 }
1406
1407 dest_ring->base_addr_ce_space_unaligned = base_addr;
1408
1409
1410
1411
1412 memset(dest_ring->base_addr_owner_space_unaligned, 0,
1413 nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
1414
1415 dest_ring->base_addr_owner_space =
1416 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1417 CE_DESC_RING_ALIGN);
1418 dest_ring->base_addr_ce_space =
1419 ALIGN(dest_ring->base_addr_ce_space_unaligned,
1420 CE_DESC_RING_ALIGN);
1421
1422 return dest_ring;
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1433 const struct ce_attr *attr)
1434{
1435 int ret;
1436
1437 if (attr->src_nentries) {
1438 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1439 if (ret) {
1440 ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
1441 ce_id, ret);
1442 return ret;
1443 }
1444 }
1445
1446 if (attr->dest_nentries) {
1447 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1448 if (ret) {
1449 ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1450 ce_id, ret);
1451 return ret;
1452 }
1453 }
1454
1455 return 0;
1456}
1457
1458static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1459{
1460 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1461
1462 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
1463 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1464 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1465 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1466}
1467
1468static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1469{
1470 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1471
1472 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
1473 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1474 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1475}
1476
1477void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1478{
1479 ath10k_ce_deinit_src_ring(ar, ce_id);
1480 ath10k_ce_deinit_dest_ring(ar, ce_id);
1481}
1482
1483static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1484{
1485 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1486 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1487
1488 if (ce_state->src_ring) {
1489 dma_free_coherent(ar->dev,
1490 (ce_state->src_ring->nentries *
1491 sizeof(struct ce_desc) +
1492 CE_DESC_RING_ALIGN),
1493 ce_state->src_ring->base_addr_owner_space,
1494 ce_state->src_ring->base_addr_ce_space);
1495 kfree(ce_state->src_ring);
1496 }
1497
1498 if (ce_state->dest_ring) {
1499 dma_free_coherent(ar->dev,
1500 (ce_state->dest_ring->nentries *
1501 sizeof(struct ce_desc) +
1502 CE_DESC_RING_ALIGN),
1503 ce_state->dest_ring->base_addr_owner_space,
1504 ce_state->dest_ring->base_addr_ce_space);
1505 kfree(ce_state->dest_ring);
1506 }
1507
1508 ce_state->src_ring = NULL;
1509 ce_state->dest_ring = NULL;
1510}
1511
1512static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
1513{
1514 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1515 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1516
1517 if (ce_state->src_ring) {
1518 dma_free_coherent(ar->dev,
1519 (ce_state->src_ring->nentries *
1520 sizeof(struct ce_desc_64) +
1521 CE_DESC_RING_ALIGN),
1522 ce_state->src_ring->base_addr_owner_space,
1523 ce_state->src_ring->base_addr_ce_space);
1524 kfree(ce_state->src_ring);
1525 }
1526
1527 if (ce_state->dest_ring) {
1528 dma_free_coherent(ar->dev,
1529 (ce_state->dest_ring->nentries *
1530 sizeof(struct ce_desc_64) +
1531 CE_DESC_RING_ALIGN),
1532 ce_state->dest_ring->base_addr_owner_space,
1533 ce_state->dest_ring->base_addr_ce_space);
1534 kfree(ce_state->dest_ring);
1535 }
1536
1537 ce_state->src_ring = NULL;
1538 ce_state->dest_ring = NULL;
1539}
1540
1541void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1542{
1543 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1544 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1545
1546 ce_state->ops->ce_free_pipe(ar, ce_id);
1547}
1548
1549void ath10k_ce_dump_registers(struct ath10k *ar,
1550 struct ath10k_fw_crash_data *crash_data)
1551{
1552 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1553 struct ath10k_ce_crash_data ce_data;
1554 u32 addr, id;
1555
1556 lockdep_assert_held(&ar->data_lock);
1557
1558 ath10k_err(ar, "Copy Engine register dump:\n");
1559
1560 spin_lock_bh(&ce->ce_lock);
1561 for (id = 0; id < CE_COUNT; id++) {
1562 addr = ath10k_ce_base_address(ar, id);
1563 ce_data.base_addr = cpu_to_le32(addr);
1564
1565 ce_data.src_wr_idx =
1566 cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
1567 ce_data.src_r_idx =
1568 cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
1569 ce_data.dst_wr_idx =
1570 cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
1571 ce_data.dst_r_idx =
1572 cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
1573
1574 if (crash_data)
1575 crash_data->ce_crash_data[id] = ce_data;
1576
1577 ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
1578 le32_to_cpu(ce_data.base_addr),
1579 le32_to_cpu(ce_data.src_wr_idx),
1580 le32_to_cpu(ce_data.src_r_idx),
1581 le32_to_cpu(ce_data.dst_wr_idx),
1582 le32_to_cpu(ce_data.dst_r_idx));
1583 }
1584
1585 spin_unlock_bh(&ce->ce_lock);
1586}
1587
1588static const struct ath10k_ce_ops ce_ops = {
1589 .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
1590 .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
1591 .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
1592 .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
1593 .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
1594 .ce_extract_desc_data = ath10k_ce_extract_desc_data,
1595 .ce_free_pipe = _ath10k_ce_free_pipe,
1596 .ce_send_nolock = _ath10k_ce_send_nolock,
1597};
1598
1599static const struct ath10k_ce_ops ce_64_ops = {
1600 .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
1601 .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
1602 .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
1603 .ce_completed_recv_next_nolock =
1604 _ath10k_ce_completed_recv_next_nolock_64,
1605 .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
1606 .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
1607 .ce_free_pipe = _ath10k_ce_free_pipe_64,
1608 .ce_send_nolock = _ath10k_ce_send_nolock_64,
1609};
1610
1611static void ath10k_ce_set_ops(struct ath10k *ar,
1612 struct ath10k_ce_pipe *ce_state)
1613{
1614 switch (ar->hw_rev) {
1615 case ATH10K_HW_WCN3990:
1616 ce_state->ops = &ce_64_ops;
1617 break;
1618 default:
1619 ce_state->ops = &ce_ops;
1620 break;
1621 }
1622}
1623
1624int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1625 const struct ce_attr *attr)
1626{
1627 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1628 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1629 int ret;
1630
1631 ath10k_ce_set_ops(ar, ce_state);
1632
1633
1634
1635
1636
1637 BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
1638 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1639 BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
1640 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1641 BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
1642 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1643
1644 ce_state->ar = ar;
1645 ce_state->id = ce_id;
1646 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1647 ce_state->attr_flags = attr->flags;
1648 ce_state->src_sz_max = attr->src_sz_max;
1649
1650 if (attr->src_nentries)
1651 ce_state->send_cb = attr->send_cb;
1652
1653 if (attr->dest_nentries)
1654 ce_state->recv_cb = attr->recv_cb;
1655
1656 if (attr->src_nentries) {
1657 ce_state->src_ring =
1658 ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
1659 if (IS_ERR(ce_state->src_ring)) {
1660 ret = PTR_ERR(ce_state->src_ring);
1661 ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
1662 ce_id, ret);
1663 ce_state->src_ring = NULL;
1664 return ret;
1665 }
1666 }
1667
1668 if (attr->dest_nentries) {
1669 ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
1670 ce_id,
1671 attr);
1672 if (IS_ERR(ce_state->dest_ring)) {
1673 ret = PTR_ERR(ce_state->dest_ring);
1674 ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
1675 ce_id, ret);
1676 ce_state->dest_ring = NULL;
1677 return ret;
1678 }
1679 }
1680
1681 return 0;
1682}
1683