1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef _QED_CHAIN_H
34#define _QED_CHAIN_H
35
36#include <linux/types.h>
37#include <asm/byteorder.h>
38#include <linux/kernel.h>
39#include <linux/list.h>
40#include <linux/slab.h>
41#include <linux/qed/common_hsi.h>
42
43enum qed_chain_mode {
44
45 QED_CHAIN_MODE_NEXT_PTR,
46
47
48 QED_CHAIN_MODE_SINGLE,
49
50
51 QED_CHAIN_MODE_PBL,
52};
53
54enum qed_chain_use_mode {
55 QED_CHAIN_USE_TO_PRODUCE,
56 QED_CHAIN_USE_TO_CONSUME,
57 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
58};
59
60enum qed_chain_cnt_type {
61
62 QED_CHAIN_CNT_TYPE_U16,
63
64
65 QED_CHAIN_CNT_TYPE_U32,
66};
67
68struct qed_chain_next {
69 struct regpair next_phys;
70 void *next_virt;
71};
72
73struct qed_chain_pbl_u16 {
74 u16 prod_page_idx;
75 u16 cons_page_idx;
76};
77
78struct qed_chain_pbl_u32 {
79 u32 prod_page_idx;
80 u32 cons_page_idx;
81};
82
83struct qed_chain_ext_pbl {
84 dma_addr_t p_pbl_phys;
85 void *p_pbl_virt;
86};
87
88struct qed_chain_u16 {
89
90 u16 prod_idx;
91 u16 cons_idx;
92};
93
94struct qed_chain_u32 {
95
96 u32 prod_idx;
97 u32 cons_idx;
98};
99
100struct qed_chain {
101
102
103
104
105 void *p_prod_elem;
106 void *p_cons_elem;
107
108
109 struct {
110
111
112
113 void **pp_virt_addr_tbl;
114
115 union {
116 struct qed_chain_pbl_u16 u16;
117 struct qed_chain_pbl_u32 u32;
118 } c;
119 } pbl;
120
121 union {
122 struct qed_chain_u16 chain16;
123 struct qed_chain_u32 chain32;
124 } u;
125
126
127 u32 capacity;
128 u32 page_cnt;
129
130 enum qed_chain_mode mode;
131
132
133 u16 elem_per_page;
134 u16 elem_per_page_mask;
135 u16 elem_size;
136 u16 next_page_mask;
137 u16 usable_per_page;
138 u8 elem_unusable;
139
140 u8 cnt_type;
141
142
143
144
145
146
147 struct {
148 dma_addr_t p_phys_table;
149 void *p_virt_table;
150 } pbl_sp;
151
152
153
154
155
156 void *p_virt_addr;
157 dma_addr_t p_phys_addr;
158
159
160 u32 size;
161
162 u8 intended_use;
163
164 bool b_external_pbl;
165};
166
167#define QED_CHAIN_PBL_ENTRY_SIZE (8)
168#define QED_CHAIN_PAGE_SIZE (0x1000)
169#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
170
171#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
172 (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
173 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
174 (elem_size))) : 0)
175
176#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
177 ((u32)(ELEMS_PER_PAGE(elem_size) - \
178 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
179
180#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
181 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
182
183#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
184#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
185
186
187static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
188{
189 return p_chain->u.chain16.prod_idx;
190}
191
192static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
193{
194 return p_chain->u.chain16.cons_idx;
195}
196
197static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
198{
199 return p_chain->u.chain32.cons_idx;
200}
201
202static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
203{
204 u16 used;
205
206 used = (u16) (((u32)0x10000 +
207 (u32)p_chain->u.chain16.prod_idx) -
208 (u32)p_chain->u.chain16.cons_idx);
209 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
210 used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
211 p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
212
213 return (u16)(p_chain->capacity - used);
214}
215
216static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
217{
218 u32 used;
219
220 used = (u32) (((u64)0x100000000ULL +
221 (u64)p_chain->u.chain32.prod_idx) -
222 (u64)p_chain->u.chain32.cons_idx);
223 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
224 used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
225 p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
226
227 return p_chain->capacity - used;
228}
229
230static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
231{
232 return p_chain->usable_per_page;
233}
234
235static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
236{
237 return p_chain->elem_unusable;
238}
239
240static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
241{
242 return p_chain->page_cnt;
243}
244
245static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
246{
247 return p_chain->pbl_sp.p_phys_table;
248}
249
250
251
252
253
254
255
256
257
258
259
260static inline void
261qed_chain_advance_page(struct qed_chain *p_chain,
262 void **p_next_elem, void *idx_to_inc, void *page_to_inc)
263{
264 struct qed_chain_next *p_next = NULL;
265 u32 page_index = 0;
266
267 switch (p_chain->mode) {
268 case QED_CHAIN_MODE_NEXT_PTR:
269 p_next = *p_next_elem;
270 *p_next_elem = p_next->next_virt;
271 if (is_chain_u16(p_chain))
272 *(u16 *)idx_to_inc += p_chain->elem_unusable;
273 else
274 *(u32 *)idx_to_inc += p_chain->elem_unusable;
275 break;
276 case QED_CHAIN_MODE_SINGLE:
277 *p_next_elem = p_chain->p_virt_addr;
278 break;
279
280 case QED_CHAIN_MODE_PBL:
281 if (is_chain_u16(p_chain)) {
282 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
283 *(u16 *)page_to_inc = 0;
284 page_index = *(u16 *)page_to_inc;
285 } else {
286 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
287 *(u32 *)page_to_inc = 0;
288 page_index = *(u32 *)page_to_inc;
289 }
290 *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
291 }
292}
293
294#define is_unusable_idx(p, idx) \
295 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
296
297#define is_unusable_idx_u32(p, idx) \
298 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
299#define is_unusable_next_idx(p, idx) \
300 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
301 (p)->usable_per_page)
302
303#define is_unusable_next_idx_u32(p, idx) \
304 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
305 (p)->usable_per_page)
306
307#define test_and_skip(p, idx) \
308 do { \
309 if (is_chain_u16(p)) { \
310 if (is_unusable_idx(p, idx)) \
311 (p)->u.chain16.idx += (p)->elem_unusable; \
312 } else { \
313 if (is_unusable_idx_u32(p, idx)) \
314 (p)->u.chain32.idx += (p)->elem_unusable; \
315 } \
316 } while (0)
317
318
319
320
321
322
323
324
325
326static inline void qed_chain_return_produced(struct qed_chain *p_chain)
327{
328 if (is_chain_u16(p_chain))
329 p_chain->u.chain16.cons_idx++;
330 else
331 p_chain->u.chain32.cons_idx++;
332 test_and_skip(p_chain, cons_idx);
333}
334
335
336
337
338
339
340
341
342
343
344
345
346static inline void *qed_chain_produce(struct qed_chain *p_chain)
347{
348 void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
349
350 if (is_chain_u16(p_chain)) {
351 if ((p_chain->u.chain16.prod_idx &
352 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
353 p_prod_idx = &p_chain->u.chain16.prod_idx;
354 p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
355 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
356 p_prod_idx, p_prod_page_idx);
357 }
358 p_chain->u.chain16.prod_idx++;
359 } else {
360 if ((p_chain->u.chain32.prod_idx &
361 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
362 p_prod_idx = &p_chain->u.chain32.prod_idx;
363 p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
364 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
365 p_prod_idx, p_prod_page_idx);
366 }
367 p_chain->u.chain32.prod_idx++;
368 }
369
370 p_ret = p_chain->p_prod_elem;
371 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
372 p_chain->elem_size);
373
374 return p_ret;
375}
376
377
378
379
380
381
382
383
384
385
386
387static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
388{
389 return p_chain->capacity;
390}
391
392
393
394
395
396
397
398
399
400static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
401{
402 test_and_skip(p_chain, prod_idx);
403 if (is_chain_u16(p_chain))
404 p_chain->u.chain16.prod_idx++;
405 else
406 p_chain->u.chain32.prod_idx++;
407}
408
409
410
411
412
413
414
415
416
417
418
419static inline void *qed_chain_consume(struct qed_chain *p_chain)
420{
421 void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
422
423 if (is_chain_u16(p_chain)) {
424 if ((p_chain->u.chain16.cons_idx &
425 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
426 p_cons_idx = &p_chain->u.chain16.cons_idx;
427 p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
428 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
429 p_cons_idx, p_cons_page_idx);
430 }
431 p_chain->u.chain16.cons_idx++;
432 } else {
433 if ((p_chain->u.chain32.cons_idx &
434 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
435 p_cons_idx = &p_chain->u.chain32.cons_idx;
436 p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
437 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
438 p_cons_idx, p_cons_page_idx);
439 }
440 p_chain->u.chain32.cons_idx++;
441 }
442
443 p_ret = p_chain->p_cons_elem;
444 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
445 p_chain->elem_size);
446
447 return p_ret;
448}
449
450
451
452
453
454
455static inline void qed_chain_reset(struct qed_chain *p_chain)
456{
457 u32 i;
458
459 if (is_chain_u16(p_chain)) {
460 p_chain->u.chain16.prod_idx = 0;
461 p_chain->u.chain16.cons_idx = 0;
462 } else {
463 p_chain->u.chain32.prod_idx = 0;
464 p_chain->u.chain32.cons_idx = 0;
465 }
466 p_chain->p_cons_elem = p_chain->p_virt_addr;
467 p_chain->p_prod_elem = p_chain->p_virt_addr;
468
469 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
470
471
472
473
474
475 u32 reset_val = p_chain->page_cnt - 1;
476
477 if (is_chain_u16(p_chain)) {
478 p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
479 p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
480 } else {
481 p_chain->pbl.c.u32.prod_page_idx = reset_val;
482 p_chain->pbl.c.u32.cons_page_idx = reset_val;
483 }
484 }
485
486 switch (p_chain->intended_use) {
487 case QED_CHAIN_USE_TO_CONSUME:
488
489 for (i = 0; i < p_chain->capacity; i++)
490 qed_chain_recycle_consumed(p_chain);
491 break;
492
493 case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
494 case QED_CHAIN_USE_TO_PRODUCE:
495 default:
496
497 break;
498 }
499}
500
501
502
503
504
505
506
507
508
509
510
511
512static inline void qed_chain_init_params(struct qed_chain *p_chain,
513 u32 page_cnt,
514 u8 elem_size,
515 enum qed_chain_use_mode intended_use,
516 enum qed_chain_mode mode,
517 enum qed_chain_cnt_type cnt_type)
518{
519
520 p_chain->p_virt_addr = NULL;
521 p_chain->p_phys_addr = 0;
522 p_chain->elem_size = elem_size;
523 p_chain->intended_use = (u8)intended_use;
524 p_chain->mode = mode;
525 p_chain->cnt_type = (u8)cnt_type;
526
527 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
528 p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
529 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
530 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
531 p_chain->next_page_mask = (p_chain->usable_per_page &
532 p_chain->elem_per_page_mask);
533
534 p_chain->page_cnt = page_cnt;
535 p_chain->capacity = p_chain->usable_per_page * page_cnt;
536 p_chain->size = p_chain->elem_per_page * page_cnt;
537
538 p_chain->pbl_sp.p_phys_table = 0;
539 p_chain->pbl_sp.p_virt_table = NULL;
540 p_chain->pbl.pp_virt_addr_tbl = NULL;
541}
542
543
544
545
546
547
548
549
550
551
552
553static inline void qed_chain_init_mem(struct qed_chain *p_chain,
554 void *p_virt_addr, dma_addr_t p_phys_addr)
555{
556 p_chain->p_virt_addr = p_virt_addr;
557 p_chain->p_phys_addr = p_phys_addr;
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
576 void *p_virt_pbl,
577 dma_addr_t p_phys_pbl,
578 void **pp_virt_addr_tbl)
579{
580 p_chain->pbl_sp.p_phys_table = p_phys_pbl;
581 p_chain->pbl_sp.p_virt_table = p_virt_pbl;
582 p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
583}
584
585
586
587
588
589
590
591
592
593
594
595
596
597static inline void
598qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
599 void *p_virt_curr,
600 void *p_virt_next, dma_addr_t p_phys_next)
601{
602 struct qed_chain_next *p_next;
603 u32 size;
604
605 size = p_chain->elem_size * p_chain->usable_per_page;
606 p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
607
608 DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
609
610 p_next->next_virt = p_virt_next;
611}
612
613
614
615
616
617
618
619
620
621
622static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
623{
624 struct qed_chain_next *p_next = NULL;
625 void *p_virt_addr = NULL;
626 u32 size, last_page_idx;
627
628 if (!p_chain->p_virt_addr)
629 goto out;
630
631 switch (p_chain->mode) {
632 case QED_CHAIN_MODE_NEXT_PTR:
633 size = p_chain->elem_size * p_chain->usable_per_page;
634 p_virt_addr = p_chain->p_virt_addr;
635 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
636 while (p_next->next_virt != p_chain->p_virt_addr) {
637 p_virt_addr = p_next->next_virt;
638 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
639 size);
640 }
641 break;
642 case QED_CHAIN_MODE_SINGLE:
643 p_virt_addr = p_chain->p_virt_addr;
644 break;
645 case QED_CHAIN_MODE_PBL:
646 last_page_idx = p_chain->page_cnt - 1;
647 p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
648 break;
649 }
650
651 size = p_chain->elem_size * (p_chain->usable_per_page - 1);
652 p_virt_addr = (u8 *)p_virt_addr + size;
653out:
654 return p_virt_addr;
655}
656
657
658
659
660
661
662
663static inline void qed_chain_set_prod(struct qed_chain *p_chain,
664 u32 prod_idx, void *p_prod_elem)
665{
666 if (is_chain_u16(p_chain))
667 p_chain->u.chain16.prod_idx = (u16) prod_idx;
668 else
669 p_chain->u.chain32.prod_idx = prod_idx;
670 p_chain->p_prod_elem = p_prod_elem;
671}
672
673
674
675
676
677
678static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
679{
680 u32 i, page_cnt;
681
682 if (p_chain->mode != QED_CHAIN_MODE_PBL)
683 return;
684
685 page_cnt = qed_chain_get_page_cnt(p_chain);
686
687 for (i = 0; i < page_cnt; i++)
688 memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
689 QED_CHAIN_PAGE_SIZE);
690}
691
692#endif
693