1
2
3
4
5
6
7#include <linux/types.h>
8#include <linux/bits.h>
9#include <linux/bitfield.h>
10#include <linux/refcount.h>
11#include <linux/scatterlist.h>
12#include <linux/dma-direction.h>
13
14#include "gsi.h"
15#include "gsi_private.h"
16#include "gsi_trans.h"
17#include "ipa_gsi.h"
18#include "ipa_data.h"
19#include "ipa_cmd.h"
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69enum gsi_tre_type {
70 GSI_RE_XFER = 0x2,
71 GSI_RE_IMMD_CMD = 0x3,
72};
73
74
75struct gsi_tre {
76 __le64 addr;
77 __le16 len_opcode;
78 __le16 reserved;
79 __le32 flags;
80};
81
82
83#define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
84#define TRE_FLAGS_IEOB_FMASK GENMASK(8, 8)
85#define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
86#define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
87#define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
88
89int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
90 u32 max_alloc)
91{
92 void *virt;
93
94#ifdef IPA_VALIDATE
95 if (!size || size % 8)
96 return -EINVAL;
97 if (count < max_alloc)
98 return -EINVAL;
99 if (!max_alloc)
100 return -EINVAL;
101#endif
102
103
104
105
106
107
108
109
110 virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
111 if (!virt)
112 return -ENOMEM;
113
114 pool->base = virt;
115
116 pool->count = ksize(pool->base) / size;
117 pool->free = 0;
118 pool->max_alloc = max_alloc;
119 pool->size = size;
120 pool->addr = 0;
121
122 return 0;
123}
124
125void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
126{
127 kfree(pool->base);
128 memset(pool, 0, sizeof(*pool));
129}
130
131
132
133
134
135
136
137int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
138 size_t size, u32 count, u32 max_alloc)
139{
140 size_t total_size;
141 dma_addr_t addr;
142 void *virt;
143
144#ifdef IPA_VALIDATE
145 if (!size || size % 8)
146 return -EINVAL;
147 if (count < max_alloc)
148 return -EINVAL;
149 if (!max_alloc)
150 return -EINVAL;
151#endif
152
153
154 size = __roundup_pow_of_two(size);
155 total_size = (count + max_alloc - 1) * size;
156
157
158
159
160
161 total_size = get_order(total_size) << PAGE_SHIFT;
162
163 virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
164 if (!virt)
165 return -ENOMEM;
166
167 pool->base = virt;
168 pool->count = total_size / size;
169 pool->free = 0;
170 pool->size = size;
171 pool->max_alloc = max_alloc;
172 pool->addr = addr;
173
174 return 0;
175}
176
177void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
178{
179 dma_free_coherent(dev, pool->size, pool->base, pool->addr);
180 memset(pool, 0, sizeof(*pool));
181}
182
183
184static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
185{
186 u32 offset;
187
188
189
190
191
192 if (count > pool->count - pool->free)
193 pool->free = 0;
194
195 offset = pool->free * pool->size;
196 pool->free += count;
197 memset(pool->base + offset, 0, count * pool->size);
198
199 return offset;
200}
201
202
203void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
204{
205 return pool->base + gsi_trans_pool_alloc_common(pool, count);
206}
207
208
209void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
210{
211 u32 offset = gsi_trans_pool_alloc_common(pool, 1);
212
213 *addr = pool->addr + offset;
214
215 return pool->base + offset;
216}
217
218
219
220
221void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
222{
223 void *end = pool->base + pool->count * pool->size;
224
225
226
227
228 element += pool->size;
229
230 return element < end ? element : pool->base;
231}
232
233
234static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
235 struct gsi_trans *trans)
236{
237
238 channel->trans_info.map[index % channel->tre_ring.count] = trans;
239}
240
241
242struct gsi_trans *
243gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
244{
245
246 return channel->trans_info.map[index % channel->tre_ring.count];
247}
248
249
250struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
251{
252 return list_first_entry_or_null(&channel->trans_info.complete,
253 struct gsi_trans, links);
254}
255
256
257static void gsi_trans_move_pending(struct gsi_trans *trans)
258{
259 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
260 struct gsi_trans_info *trans_info = &channel->trans_info;
261
262 spin_lock_bh(&trans_info->spinlock);
263
264 list_move_tail(&trans->links, &trans_info->pending);
265
266 spin_unlock_bh(&trans_info->spinlock);
267}
268
269
270
271
272void gsi_trans_move_complete(struct gsi_trans *trans)
273{
274 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
275 struct gsi_trans_info *trans_info = &channel->trans_info;
276 struct list_head list;
277
278 spin_lock_bh(&trans_info->spinlock);
279
280
281 list_cut_position(&list, &trans_info->pending, &trans->links);
282 list_splice_tail(&list, &trans_info->complete);
283
284 spin_unlock_bh(&trans_info->spinlock);
285}
286
287
288void gsi_trans_move_polled(struct gsi_trans *trans)
289{
290 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
291 struct gsi_trans_info *trans_info = &channel->trans_info;
292
293 spin_lock_bh(&trans_info->spinlock);
294
295 list_move_tail(&trans->links, &trans_info->polled);
296
297 spin_unlock_bh(&trans_info->spinlock);
298}
299
300
301static bool
302gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
303{
304 int avail = atomic_read(&trans_info->tre_avail);
305 int new;
306
307 do {
308 new = avail - (int)tre_count;
309 if (unlikely(new < 0))
310 return false;
311 } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
312
313 return true;
314}
315
316
317static void
318gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
319{
320 atomic_add(tre_count, &trans_info->tre_avail);
321}
322
323
324struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
325 u32 tre_count,
326 enum dma_data_direction direction)
327{
328 struct gsi_channel *channel = &gsi->channel[channel_id];
329 struct gsi_trans_info *trans_info;
330 struct gsi_trans *trans;
331
332
333
334 trans_info = &channel->trans_info;
335
336
337
338
339 if (!gsi_trans_tre_reserve(trans_info, tre_count))
340 return NULL;
341
342
343 trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
344 trans->gsi = gsi;
345 trans->channel_id = channel_id;
346 trans->tre_count = tre_count;
347 init_completion(&trans->completion);
348
349
350 trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
351 sg_init_marker(trans->sgl, tre_count);
352
353 trans->direction = direction;
354
355 spin_lock_bh(&trans_info->spinlock);
356
357 list_add_tail(&trans->links, &trans_info->alloc);
358
359 spin_unlock_bh(&trans_info->spinlock);
360
361 refcount_set(&trans->refcount, 1);
362
363 return trans;
364}
365
366
367void gsi_trans_free(struct gsi_trans *trans)
368{
369 struct gsi_trans_info *trans_info;
370
371 if (!refcount_dec_and_test(&trans->refcount))
372 return;
373
374 trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
375
376 spin_lock_bh(&trans_info->spinlock);
377
378 list_del(&trans->links);
379
380 spin_unlock_bh(&trans_info->spinlock);
381
382 ipa_gsi_trans_release(trans);
383
384
385
386
387 gsi_trans_tre_release(trans_info, trans->tre_count);
388}
389
390
391void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
392 dma_addr_t addr, enum dma_data_direction direction,
393 enum ipa_cmd_opcode opcode)
394{
395 struct ipa_cmd_info *info;
396 u32 which = trans->used++;
397 struct scatterlist *sg;
398
399
400
401
402
403
404
405 sg = &trans->sgl[which];
406
407 sg_set_buf(sg, buf, size);
408 sg_dma_address(sg) = addr;
409 sg_dma_len(sg) = sg->length;
410
411 info = &trans->info[which];
412 info->opcode = opcode;
413 info->direction = direction;
414}
415
416
417int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
418 u32 offset)
419{
420 struct scatterlist *sg = &trans->sgl[0];
421 int ret;
422
423
424
425
426 sg_set_page(sg, page, size, offset);
427 ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
428 if (!ret)
429 return -ENOMEM;
430
431 trans->used++;
432
433 return 0;
434}
435
436
437int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
438{
439 struct scatterlist *sg = &trans->sgl[0];
440 u32 used;
441 int ret;
442
443
444
445
446
447 ret = skb_to_sgvec(skb, sg, 0, skb->len);
448 if (ret < 0)
449 return ret;
450 used = ret;
451
452 ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
453 if (!ret)
454 return -ENOMEM;
455
456 trans->used += used;
457
458 return 0;
459}
460
461
462static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
463{
464 return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
465 : cpu_to_le16((u16)opcode);
466}
467
468
469static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
470{
471 enum gsi_tre_type tre_type;
472 u32 tre_flags;
473
474 tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
475 tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
476
477
478 if (last_tre) {
479
480 tre_flags |= TRE_FLAGS_IEOT_FMASK;
481
482 if (bei)
483 tre_flags |= TRE_FLAGS_BEI_FMASK;
484 } else {
485 tre_flags |= TRE_FLAGS_CHAIN_FMASK;
486 }
487
488 return cpu_to_le32(tre_flags);
489}
490
491static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
492 u32 len, bool last_tre, bool bei,
493 enum ipa_cmd_opcode opcode)
494{
495 struct gsi_tre tre;
496
497 tre.addr = cpu_to_le64(addr);
498 tre.len_opcode = gsi_tre_len_opcode(opcode, len);
499 tre.reserved = 0;
500 tre.flags = gsi_tre_flags(last_tre, bei, opcode);
501
502
503
504
505 *dest_tre = tre;
506}
507
508
509
510
511
512
513
514
515
516
517
518
519static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
520{
521 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
522 struct gsi_ring *ring = &channel->tre_ring;
523 enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
524 bool bei = channel->toward_ipa;
525 struct ipa_cmd_info *info;
526 struct gsi_tre *dest_tre;
527 struct scatterlist *sg;
528 u32 byte_count = 0;
529 u32 avail;
530 u32 i;
531
532
533
534
535
536
537
538
539 info = trans->info ? &trans->info[0] : NULL;
540 avail = ring->count - ring->index % ring->count;
541 dest_tre = gsi_ring_virt(ring, ring->index);
542 for_each_sg(trans->sgl, sg, trans->used, i) {
543 bool last_tre = i == trans->used - 1;
544 dma_addr_t addr = sg_dma_address(sg);
545 u32 len = sg_dma_len(sg);
546
547 byte_count += len;
548 if (!avail--)
549 dest_tre = gsi_ring_virt(ring, 0);
550 if (info)
551 opcode = info++->opcode;
552
553 gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
554 dest_tre++;
555 }
556 ring->index += trans->used;
557
558 if (channel->toward_ipa) {
559
560 trans->len = byte_count;
561 trans->trans_count = channel->trans_count;
562 trans->byte_count = channel->byte_count;
563 channel->trans_count++;
564 channel->byte_count += byte_count;
565 }
566
567
568 gsi_channel_trans_map(channel, ring->index - 1, trans);
569
570 gsi_trans_move_pending(trans);
571
572
573 if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
574
575 if (channel->toward_ipa)
576 gsi_channel_tx_queued(channel);
577 gsi_channel_doorbell(channel);
578 }
579}
580
581
582void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
583{
584 if (trans->used)
585 __gsi_trans_commit(trans, ring_db);
586 else
587 gsi_trans_free(trans);
588}
589
590
591void gsi_trans_commit_wait(struct gsi_trans *trans)
592{
593 if (!trans->used)
594 goto out_trans_free;
595
596 refcount_inc(&trans->refcount);
597
598 __gsi_trans_commit(trans, true);
599
600 wait_for_completion(&trans->completion);
601
602out_trans_free:
603 gsi_trans_free(trans);
604}
605
606
607int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
608 unsigned long timeout)
609{
610 unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
611 unsigned long remaining = 1;
612
613 if (!trans->used)
614 goto out_trans_free;
615
616 refcount_inc(&trans->refcount);
617
618 __gsi_trans_commit(trans, true);
619
620 remaining = wait_for_completion_timeout(&trans->completion,
621 timeout_jiffies);
622out_trans_free:
623 gsi_trans_free(trans);
624
625 return remaining ? 0 : -ETIMEDOUT;
626}
627
628
629void gsi_trans_complete(struct gsi_trans *trans)
630{
631
632 if (trans->direction != DMA_NONE)
633 dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
634 trans->direction);
635
636 ipa_gsi_trans_complete(trans);
637
638 complete(&trans->completion);
639
640 gsi_trans_free(trans);
641}
642
643
644void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
645{
646 struct gsi_trans_info *trans_info = &channel->trans_info;
647 struct gsi_trans *trans;
648 bool cancelled;
649
650
651 spin_lock_bh(&trans_info->spinlock);
652
653 cancelled = !list_empty(&trans_info->pending);
654 list_for_each_entry(trans, &trans_info->pending, links)
655 trans->cancelled = true;
656
657 list_splice_tail_init(&trans_info->pending, &trans_info->complete);
658
659 spin_unlock_bh(&trans_info->spinlock);
660
661
662 if (cancelled)
663 napi_schedule(&channel->napi);
664}
665
666
667int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
668{
669 struct gsi_channel *channel = &gsi->channel[channel_id];
670 struct gsi_ring *ring = &channel->tre_ring;
671 struct gsi_trans_info *trans_info;
672 struct gsi_tre *dest_tre;
673
674 trans_info = &channel->trans_info;
675
676
677 if (!gsi_trans_tre_reserve(trans_info, 1))
678 return -EBUSY;
679
680
681
682 dest_tre = gsi_ring_virt(ring, ring->index);
683 gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
684
685 ring->index++;
686 gsi_channel_doorbell(channel);
687
688 return 0;
689}
690
691
692void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
693{
694 struct gsi_channel *channel = &gsi->channel[channel_id];
695
696 gsi_trans_tre_release(&channel->trans_info, 1);
697}
698
699
700int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
701{
702 struct gsi_channel *channel = &gsi->channel[channel_id];
703 struct gsi_trans_info *trans_info;
704 u32 tre_max;
705 int ret;
706
707
708 BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
709
710
711
712
713
714 trans_info = &channel->trans_info;
715 trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
716 GFP_KERNEL);
717 if (!trans_info->map)
718 return -ENOMEM;
719
720
721
722
723
724
725
726
727 tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
728
729
730 ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
731 tre_max, 1);
732 if (ret)
733 goto err_kfree;
734
735
736
737
738
739
740
741
742
743
744
745 ret = gsi_trans_pool_init(&trans_info->sg_pool,
746 sizeof(struct scatterlist),
747 tre_max, channel->tlv_count);
748 if (ret)
749 goto err_trans_pool_exit;
750
751
752
753
754
755
756
757
758 atomic_set(&trans_info->tre_avail, tre_max);
759
760 spin_lock_init(&trans_info->spinlock);
761 INIT_LIST_HEAD(&trans_info->alloc);
762 INIT_LIST_HEAD(&trans_info->pending);
763 INIT_LIST_HEAD(&trans_info->complete);
764 INIT_LIST_HEAD(&trans_info->polled);
765
766 return 0;
767
768err_trans_pool_exit:
769 gsi_trans_pool_exit(&trans_info->pool);
770err_kfree:
771 kfree(trans_info->map);
772
773 dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
774 ret, channel_id);
775
776 return ret;
777}
778
779
780void gsi_channel_trans_exit(struct gsi_channel *channel)
781{
782 struct gsi_trans_info *trans_info = &channel->trans_info;
783
784 gsi_trans_pool_exit(&trans_info->sg_pool);
785 gsi_trans_pool_exit(&trans_info->pool);
786 kfree(trans_info->map);
787}
788