1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef _QED_CHAIN_H
34#define _QED_CHAIN_H
35
36#include <linux/types.h>
37#include <asm/byteorder.h>
38#include <linux/kernel.h>
39#include <linux/list.h>
40#include <linux/slab.h>
41#include <linux/qed/common_hsi.h>
42
43enum qed_chain_mode {
44
45 QED_CHAIN_MODE_NEXT_PTR,
46
47
48 QED_CHAIN_MODE_SINGLE,
49
50
51 QED_CHAIN_MODE_PBL,
52};
53
54enum qed_chain_use_mode {
55 QED_CHAIN_USE_TO_PRODUCE,
56 QED_CHAIN_USE_TO_CONSUME,
57 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
58};
59
60enum qed_chain_cnt_type {
61
62 QED_CHAIN_CNT_TYPE_U16,
63
64
65 QED_CHAIN_CNT_TYPE_U32,
66};
67
68struct qed_chain_next {
69 struct regpair next_phys;
70 void *next_virt;
71};
72
73struct qed_chain_pbl_u16 {
74 u16 prod_page_idx;
75 u16 cons_page_idx;
76};
77
78struct qed_chain_pbl_u32 {
79 u32 prod_page_idx;
80 u32 cons_page_idx;
81};
82
83struct qed_chain_u16 {
84
85 u16 prod_idx;
86 u16 cons_idx;
87};
88
89struct qed_chain_u32 {
90
91 u32 prod_idx;
92 u32 cons_idx;
93};
94
95struct qed_chain {
96
97
98
99
100 void *p_prod_elem;
101 void *p_cons_elem;
102
103
104 struct {
105
106
107
108 void **pp_virt_addr_tbl;
109
110 union {
111 struct qed_chain_pbl_u16 u16;
112 struct qed_chain_pbl_u32 u32;
113 } c;
114 } pbl;
115
116 union {
117 struct qed_chain_u16 chain16;
118 struct qed_chain_u32 chain32;
119 } u;
120
121
122 u32 capacity;
123 u32 page_cnt;
124
125 enum qed_chain_mode mode;
126
127
128 u16 elem_per_page;
129 u16 elem_per_page_mask;
130 u16 elem_size;
131 u16 next_page_mask;
132 u16 usable_per_page;
133 u8 elem_unusable;
134
135 u8 cnt_type;
136
137
138
139
140
141
142 struct {
143 dma_addr_t p_phys_table;
144 void *p_virt_table;
145 } pbl_sp;
146
147
148
149
150
151 void *p_virt_addr;
152 dma_addr_t p_phys_addr;
153
154
155 u32 size;
156
157 u8 intended_use;
158};
159
160#define QED_CHAIN_PBL_ENTRY_SIZE (8)
161#define QED_CHAIN_PAGE_SIZE (0x1000)
162#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
163
164#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
165 (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
166 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
167 (elem_size))) : 0)
168
169#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
170 ((u32)(ELEMS_PER_PAGE(elem_size) - \
171 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
172
173#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
174 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
175
176#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
177#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
178
179
180static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
181{
182 return p_chain->u.chain16.prod_idx;
183}
184
185static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
186{
187 return p_chain->u.chain16.cons_idx;
188}
189
190static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
191{
192 return p_chain->u.chain32.cons_idx;
193}
194
195static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
196{
197 u16 used;
198
199 used = (u16) (((u32)0x10000 +
200 (u32)p_chain->u.chain16.prod_idx) -
201 (u32)p_chain->u.chain16.cons_idx);
202 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
203 used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
204 p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
205
206 return (u16)(p_chain->capacity - used);
207}
208
209static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
210{
211 u32 used;
212
213 used = (u32) (((u64)0x100000000ULL +
214 (u64)p_chain->u.chain32.prod_idx) -
215 (u64)p_chain->u.chain32.cons_idx);
216 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
217 used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
218 p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
219
220 return p_chain->capacity - used;
221}
222
223static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
224{
225 return p_chain->usable_per_page;
226}
227
228static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
229{
230 return p_chain->elem_unusable;
231}
232
233static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
234{
235 return p_chain->page_cnt;
236}
237
238static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
239{
240 return p_chain->pbl_sp.p_phys_table;
241}
242
243
244
245
246
247
248
249
250
251
252
253static inline void
254qed_chain_advance_page(struct qed_chain *p_chain,
255 void **p_next_elem, void *idx_to_inc, void *page_to_inc)
256{
257 struct qed_chain_next *p_next = NULL;
258 u32 page_index = 0;
259
260 switch (p_chain->mode) {
261 case QED_CHAIN_MODE_NEXT_PTR:
262 p_next = *p_next_elem;
263 *p_next_elem = p_next->next_virt;
264 if (is_chain_u16(p_chain))
265 *(u16 *)idx_to_inc += p_chain->elem_unusable;
266 else
267 *(u32 *)idx_to_inc += p_chain->elem_unusable;
268 break;
269 case QED_CHAIN_MODE_SINGLE:
270 *p_next_elem = p_chain->p_virt_addr;
271 break;
272
273 case QED_CHAIN_MODE_PBL:
274 if (is_chain_u16(p_chain)) {
275 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
276 *(u16 *)page_to_inc = 0;
277 page_index = *(u16 *)page_to_inc;
278 } else {
279 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
280 *(u32 *)page_to_inc = 0;
281 page_index = *(u32 *)page_to_inc;
282 }
283 *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
284 }
285}
286
287#define is_unusable_idx(p, idx) \
288 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
289
290#define is_unusable_idx_u32(p, idx) \
291 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
292#define is_unusable_next_idx(p, idx) \
293 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
294 (p)->usable_per_page)
295
296#define is_unusable_next_idx_u32(p, idx) \
297 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
298 (p)->usable_per_page)
299
300#define test_and_skip(p, idx) \
301 do { \
302 if (is_chain_u16(p)) { \
303 if (is_unusable_idx(p, idx)) \
304 (p)->u.chain16.idx += (p)->elem_unusable; \
305 } else { \
306 if (is_unusable_idx_u32(p, idx)) \
307 (p)->u.chain32.idx += (p)->elem_unusable; \
308 } \
309 } while (0)
310
311
312
313
314
315
316
317
318
319static inline void qed_chain_return_produced(struct qed_chain *p_chain)
320{
321 if (is_chain_u16(p_chain))
322 p_chain->u.chain16.cons_idx++;
323 else
324 p_chain->u.chain32.cons_idx++;
325 test_and_skip(p_chain, cons_idx);
326}
327
328
329
330
331
332
333
334
335
336
337
338
339static inline void *qed_chain_produce(struct qed_chain *p_chain)
340{
341 void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
342
343 if (is_chain_u16(p_chain)) {
344 if ((p_chain->u.chain16.prod_idx &
345 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
346 p_prod_idx = &p_chain->u.chain16.prod_idx;
347 p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
348 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
349 p_prod_idx, p_prod_page_idx);
350 }
351 p_chain->u.chain16.prod_idx++;
352 } else {
353 if ((p_chain->u.chain32.prod_idx &
354 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
355 p_prod_idx = &p_chain->u.chain32.prod_idx;
356 p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
357 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
358 p_prod_idx, p_prod_page_idx);
359 }
360 p_chain->u.chain32.prod_idx++;
361 }
362
363 p_ret = p_chain->p_prod_elem;
364 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
365 p_chain->elem_size);
366
367 return p_ret;
368}
369
370
371
372
373
374
375
376
377
378
379
380static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
381{
382 return p_chain->capacity;
383}
384
385
386
387
388
389
390
391
392
393static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
394{
395 test_and_skip(p_chain, prod_idx);
396 if (is_chain_u16(p_chain))
397 p_chain->u.chain16.prod_idx++;
398 else
399 p_chain->u.chain32.prod_idx++;
400}
401
402
403
404
405
406
407
408
409
410
411
412static inline void *qed_chain_consume(struct qed_chain *p_chain)
413{
414 void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
415
416 if (is_chain_u16(p_chain)) {
417 if ((p_chain->u.chain16.cons_idx &
418 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
419 p_cons_idx = &p_chain->u.chain16.cons_idx;
420 p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
421 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
422 p_cons_idx, p_cons_page_idx);
423 }
424 p_chain->u.chain16.cons_idx++;
425 } else {
426 if ((p_chain->u.chain32.cons_idx &
427 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
428 p_cons_idx = &p_chain->u.chain32.cons_idx;
429 p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
430 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
431 p_cons_idx, p_cons_page_idx);
432 }
433 p_chain->u.chain32.cons_idx++;
434 }
435
436 p_ret = p_chain->p_cons_elem;
437 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
438 p_chain->elem_size);
439
440 return p_ret;
441}
442
443
444
445
446
447
448static inline void qed_chain_reset(struct qed_chain *p_chain)
449{
450 u32 i;
451
452 if (is_chain_u16(p_chain)) {
453 p_chain->u.chain16.prod_idx = 0;
454 p_chain->u.chain16.cons_idx = 0;
455 } else {
456 p_chain->u.chain32.prod_idx = 0;
457 p_chain->u.chain32.cons_idx = 0;
458 }
459 p_chain->p_cons_elem = p_chain->p_virt_addr;
460 p_chain->p_prod_elem = p_chain->p_virt_addr;
461
462 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
463
464
465
466
467
468 u32 reset_val = p_chain->page_cnt - 1;
469
470 if (is_chain_u16(p_chain)) {
471 p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
472 p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
473 } else {
474 p_chain->pbl.c.u32.prod_page_idx = reset_val;
475 p_chain->pbl.c.u32.cons_page_idx = reset_val;
476 }
477 }
478
479 switch (p_chain->intended_use) {
480 case QED_CHAIN_USE_TO_CONSUME:
481
482 for (i = 0; i < p_chain->capacity; i++)
483 qed_chain_recycle_consumed(p_chain);
484 break;
485
486 case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
487 case QED_CHAIN_USE_TO_PRODUCE:
488 default:
489
490 break;
491 }
492}
493
494
495
496
497
498
499
500
501
502
503
504
505static inline void qed_chain_init_params(struct qed_chain *p_chain,
506 u32 page_cnt,
507 u8 elem_size,
508 enum qed_chain_use_mode intended_use,
509 enum qed_chain_mode mode,
510 enum qed_chain_cnt_type cnt_type)
511{
512
513 p_chain->p_virt_addr = NULL;
514 p_chain->p_phys_addr = 0;
515 p_chain->elem_size = elem_size;
516 p_chain->intended_use = (u8)intended_use;
517 p_chain->mode = mode;
518 p_chain->cnt_type = (u8)cnt_type;
519
520 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
521 p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
522 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
523 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
524 p_chain->next_page_mask = (p_chain->usable_per_page &
525 p_chain->elem_per_page_mask);
526
527 p_chain->page_cnt = page_cnt;
528 p_chain->capacity = p_chain->usable_per_page * page_cnt;
529 p_chain->size = p_chain->elem_per_page * page_cnt;
530
531 p_chain->pbl_sp.p_phys_table = 0;
532 p_chain->pbl_sp.p_virt_table = NULL;
533 p_chain->pbl.pp_virt_addr_tbl = NULL;
534}
535
536
537
538
539
540
541
542
543
544
545
546static inline void qed_chain_init_mem(struct qed_chain *p_chain,
547 void *p_virt_addr, dma_addr_t p_phys_addr)
548{
549 p_chain->p_virt_addr = p_virt_addr;
550 p_chain->p_phys_addr = p_phys_addr;
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
569 void *p_virt_pbl,
570 dma_addr_t p_phys_pbl,
571 void **pp_virt_addr_tbl)
572{
573 p_chain->pbl_sp.p_phys_table = p_phys_pbl;
574 p_chain->pbl_sp.p_virt_table = p_virt_pbl;
575 p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
576}
577
578
579
580
581
582
583
584
585
586
587
588
589
590static inline void
591qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
592 void *p_virt_curr,
593 void *p_virt_next, dma_addr_t p_phys_next)
594{
595 struct qed_chain_next *p_next;
596 u32 size;
597
598 size = p_chain->elem_size * p_chain->usable_per_page;
599 p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
600
601 DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
602
603 p_next->next_virt = p_virt_next;
604}
605
606
607
608
609
610
611
612
613
614
615static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
616{
617 struct qed_chain_next *p_next = NULL;
618 void *p_virt_addr = NULL;
619 u32 size, last_page_idx;
620
621 if (!p_chain->p_virt_addr)
622 goto out;
623
624 switch (p_chain->mode) {
625 case QED_CHAIN_MODE_NEXT_PTR:
626 size = p_chain->elem_size * p_chain->usable_per_page;
627 p_virt_addr = p_chain->p_virt_addr;
628 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
629 while (p_next->next_virt != p_chain->p_virt_addr) {
630 p_virt_addr = p_next->next_virt;
631 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
632 size);
633 }
634 break;
635 case QED_CHAIN_MODE_SINGLE:
636 p_virt_addr = p_chain->p_virt_addr;
637 break;
638 case QED_CHAIN_MODE_PBL:
639 last_page_idx = p_chain->page_cnt - 1;
640 p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
641 break;
642 }
643
644 size = p_chain->elem_size * (p_chain->usable_per_page - 1);
645 p_virt_addr = (u8 *)p_virt_addr + size;
646out:
647 return p_virt_addr;
648}
649
650
651
652
653
654
655
656static inline void qed_chain_set_prod(struct qed_chain *p_chain,
657 u32 prod_idx, void *p_prod_elem)
658{
659 if (is_chain_u16(p_chain))
660 p_chain->u.chain16.prod_idx = (u16) prod_idx;
661 else
662 p_chain->u.chain32.prod_idx = prod_idx;
663 p_chain->p_prod_elem = p_prod_elem;
664}
665
666
667
668
669
670
671static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
672{
673 u32 i, page_cnt;
674
675 if (p_chain->mode != QED_CHAIN_MODE_PBL)
676 return;
677
678 page_cnt = qed_chain_get_page_cnt(p_chain);
679
680 for (i = 0; i < page_cnt; i++)
681 memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
682 QED_CHAIN_PAGE_SIZE);
683}
684
685#endif
686