1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/errno.h>
10#include <linux/pci.h>
11#include <linux/device.h>
12#include <linux/workqueue.h>
13#include <linux/interrupt.h>
14#include <linux/slab.h>
15#include <linux/dma-mapping.h>
16#include <linux/log2.h>
17#include <asm/byteorder.h>
18#include <asm/barrier.h>
19
20#include "hinic_hw_csr.h"
21#include "hinic_hw_if.h"
22#include "hinic_hw_eqs.h"
23
24#define HINIC_EQS_WQ_NAME "hinic_eqs"
25
26#define GET_EQ_NUM_PAGES(eq, pg_size) \
27 (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
28
29#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size)
30
31#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
32 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
33 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
34
35#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
36 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
37 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
38
39#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
40 HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
41 HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
42
43#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
44 HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
45 HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
46
47#define GET_EQ_ELEMENT(eq, idx) \
48 ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
49 (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
50
51#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \
52 GET_EQ_ELEMENT(eq, idx))
53
54#define GET_CEQ_ELEM(eq, idx) ((u32 *) \
55 GET_EQ_ELEMENT(eq, idx))
56
57#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx)
58
59#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx)
60
61#define PAGE_IN_4K(page_size) ((page_size) >> 12)
62#define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size)))
63
64#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
65#define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq)))
66
67#define EQ_MAX_PAGES 8
68
69#define CEQE_TYPE_SHIFT 23
70#define CEQE_TYPE_MASK 0x7
71
72#define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \
73 CEQE_TYPE_MASK)
74
75#define CEQE_DATA_MASK 0x3FFFFFF
76#define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK)
77
78#define aeq_to_aeqs(eq) \
79 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
80
81#define ceq_to_ceqs(eq) \
82 container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
83
84#define work_to_aeq_work(work) \
85 container_of(work, struct hinic_eq_work, work)
86
87#define DMA_ATTR_AEQ_DEFAULT 0
88#define DMA_ATTR_CEQ_DEFAULT 0
89
90
91#define THRESH_CEQ_DEFAULT 0
92
93enum eq_int_mode {
94 EQ_INT_MODE_ARMED,
95 EQ_INT_MODE_ALWAYS
96};
97
98enum eq_arm_state {
99 EQ_NOT_ARMED,
100 EQ_ARMED
101};
102
103
104
105
106
107
108
109
110void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
111 enum hinic_aeq_type event, void *handle,
112 void (*hwe_handler)(void *handle, void *data,
113 u8 size))
114{
115 struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
116
117 hwe_cb->hwe_handler = hwe_handler;
118 hwe_cb->handle = handle;
119 hwe_cb->hwe_state = HINIC_EQE_ENABLED;
120}
121
122
123
124
125
126
127void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
128 enum hinic_aeq_type event)
129{
130 struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
131
132 hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED;
133
134 while (hwe_cb->hwe_state & HINIC_EQE_RUNNING)
135 schedule();
136
137 hwe_cb->hwe_handler = NULL;
138}
139
140
141
142
143
144
145
146
147void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
148 enum hinic_ceq_type event, void *handle,
149 void (*handler)(void *handle, u32 ceqe_data))
150{
151 struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
152
153 ceq_cb->handler = handler;
154 ceq_cb->handle = handle;
155 ceq_cb->ceqe_state = HINIC_EQE_ENABLED;
156}
157
158
159
160
161
162
163void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
164 enum hinic_ceq_type event)
165{
166 struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
167
168 ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED;
169
170 while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING)
171 schedule();
172
173 ceq_cb->handler = NULL;
174}
175
176static u8 eq_cons_idx_checksum_set(u32 val)
177{
178 u8 checksum = 0;
179 int idx;
180
181 for (idx = 0; idx < 32; idx += 4)
182 checksum ^= ((val >> idx) & 0xF);
183
184 return (checksum & 0xF);
185}
186
187
188
189
190
191static void eq_update_ci(struct hinic_eq *eq)
192{
193 u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
194
195
196 val = hinic_hwif_read_reg(eq->hwif, addr);
197
198 val = HINIC_EQ_CI_CLEAR(val, IDX) &
199 HINIC_EQ_CI_CLEAR(val, WRAPPED) &
200 HINIC_EQ_CI_CLEAR(val, INT_ARMED) &
201 HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM);
202
203 val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
204 HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
205 HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED);
206
207 val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
208
209 hinic_hwif_write_reg(eq->hwif, addr, val);
210}
211
212
213
214
215
216static void aeq_irq_handler(struct hinic_eq *eq)
217{
218 struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
219 struct hinic_hwif *hwif = aeqs->hwif;
220 struct pci_dev *pdev = hwif->pdev;
221 struct hinic_aeq_elem *aeqe_curr;
222 struct hinic_hw_event_cb *hwe_cb;
223 enum hinic_aeq_type event;
224 unsigned long eqe_state;
225 u32 aeqe_desc;
226 int i, size;
227
228 for (i = 0; i < eq->q_len; i++) {
229 aeqe_curr = GET_CURR_AEQ_ELEM(eq);
230
231
232 aeqe_desc = be32_to_cpu(aeqe_curr->desc);
233
234
235 if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
236 break;
237
238 event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
239 if (event >= HINIC_MAX_AEQ_EVENTS) {
240 dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
241 return;
242 }
243
244 if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
245 hwe_cb = &aeqs->hwe_cb[event];
246
247 size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
248
249 eqe_state = cmpxchg(&hwe_cb->hwe_state,
250 HINIC_EQE_ENABLED,
251 HINIC_EQE_ENABLED |
252 HINIC_EQE_RUNNING);
253 if ((eqe_state == HINIC_EQE_ENABLED) &&
254 (hwe_cb->hwe_handler))
255 hwe_cb->hwe_handler(hwe_cb->handle,
256 aeqe_curr->data, size);
257 else
258 dev_err(&pdev->dev, "Unhandled AEQ Event %d\n",
259 event);
260
261 hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING;
262 }
263
264 eq->cons_idx++;
265
266 if (eq->cons_idx == eq->q_len) {
267 eq->cons_idx = 0;
268 eq->wrapped = !eq->wrapped;
269 }
270 }
271}
272
273
274
275
276
277
278static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
279{
280 struct hinic_hwif *hwif = ceqs->hwif;
281 struct pci_dev *pdev = hwif->pdev;
282 struct hinic_ceq_cb *ceq_cb;
283 enum hinic_ceq_type event;
284 unsigned long eqe_state;
285
286 event = CEQE_TYPE(ceqe);
287 if (event >= HINIC_MAX_CEQ_EVENTS) {
288 dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event);
289 return;
290 }
291
292 ceq_cb = &ceqs->ceq_cb[event];
293
294 eqe_state = cmpxchg(&ceq_cb->ceqe_state,
295 HINIC_EQE_ENABLED,
296 HINIC_EQE_ENABLED | HINIC_EQE_RUNNING);
297
298 if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler))
299 ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe));
300 else
301 dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event);
302
303 ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING;
304}
305
306
307
308
309
310static void ceq_irq_handler(struct hinic_eq *eq)
311{
312 struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
313 u32 ceqe;
314 int i;
315
316 for (i = 0; i < eq->q_len; i++) {
317 ceqe = *(GET_CURR_CEQ_ELEM(eq));
318
319
320 ceqe = be32_to_cpu(ceqe);
321
322
323 if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
324 break;
325
326 ceq_event_handler(ceqs, ceqe);
327
328 eq->cons_idx++;
329
330 if (eq->cons_idx == eq->q_len) {
331 eq->cons_idx = 0;
332 eq->wrapped = !eq->wrapped;
333 }
334 }
335}
336
337
338
339
340
341static void eq_irq_handler(void *data)
342{
343 struct hinic_eq *eq = data;
344
345 if (eq->type == HINIC_AEQ)
346 aeq_irq_handler(eq);
347 else if (eq->type == HINIC_CEQ)
348 ceq_irq_handler(eq);
349
350 eq_update_ci(eq);
351}
352
353
354
355
356
357static void eq_irq_work(struct work_struct *work)
358{
359 struct hinic_eq_work *aeq_work = work_to_aeq_work(work);
360 struct hinic_eq *aeq;
361
362 aeq = aeq_work->data;
363 eq_irq_handler(aeq);
364}
365
366
367
368
369
370static void ceq_tasklet(unsigned long ceq_data)
371{
372 struct hinic_eq *ceq = (struct hinic_eq *)ceq_data;
373
374 eq_irq_handler(ceq);
375}
376
377
378
379
380
381
382static irqreturn_t aeq_interrupt(int irq, void *data)
383{
384 struct hinic_eq_work *aeq_work;
385 struct hinic_eq *aeq = data;
386 struct hinic_aeqs *aeqs;
387
388
389 hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry);
390
391 aeq_work = &aeq->aeq_work;
392 aeq_work->data = aeq;
393
394 aeqs = aeq_to_aeqs(aeq);
395 queue_work(aeqs->workq, &aeq_work->work);
396
397 return IRQ_HANDLED;
398}
399
400
401
402
403
404
405static irqreturn_t ceq_interrupt(int irq, void *data)
406{
407 struct hinic_eq *ceq = data;
408
409
410 hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry);
411
412 tasklet_schedule(&ceq->ceq_tasklet);
413
414 return IRQ_HANDLED;
415}
416
417static void set_ctrl0(struct hinic_eq *eq)
418{
419 struct msix_entry *msix_entry = &eq->msix_entry;
420 enum hinic_eq_type type = eq->type;
421 u32 addr, val, ctrl0;
422
423 if (type == HINIC_AEQ) {
424
425 addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
426
427 val = hinic_hwif_read_reg(eq->hwif, addr);
428
429 val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) &
430 HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
431 HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
432 HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE);
433
434 ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) |
435 HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) |
436 HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
437 PCI_INTF_IDX) |
438 HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
439
440 val |= ctrl0;
441
442 hinic_hwif_write_reg(eq->hwif, addr, val);
443 } else if (type == HINIC_CEQ) {
444
445 addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
446
447 val = hinic_hwif_read_reg(eq->hwif, addr);
448
449 val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) &
450 HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
451 HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) &
452 HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
453 HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE);
454
455 ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) |
456 HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) |
457 HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) |
458 HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
459 PCI_INTF_IDX) |
460 HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
461
462 val |= ctrl0;
463
464 hinic_hwif_write_reg(eq->hwif, addr, val);
465 }
466}
467
468static void set_ctrl1(struct hinic_eq *eq)
469{
470 enum hinic_eq_type type = eq->type;
471 u32 page_size_val, elem_size;
472 u32 addr, val, ctrl1;
473
474 if (type == HINIC_AEQ) {
475
476 addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
477
478 page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
479 elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
480
481 val = hinic_hwif_read_reg(eq->hwif, addr);
482
483 val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) &
484 HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) &
485 HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
486
487 ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) |
488 HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
489 HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
490
491 val |= ctrl1;
492
493 hinic_hwif_write_reg(eq->hwif, addr, val);
494 } else if (type == HINIC_CEQ) {
495
496 addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
497
498 page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
499
500 val = hinic_hwif_read_reg(eq->hwif, addr);
501
502 val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) &
503 HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
504
505 ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) |
506 HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
507
508 val |= ctrl1;
509
510 hinic_hwif_write_reg(eq->hwif, addr, val);
511 }
512}
513
514
515
516
517
518static void set_eq_ctrls(struct hinic_eq *eq)
519{
520 set_ctrl0(eq);
521 set_ctrl1(eq);
522}
523
524
525
526
527
528
529static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
530{
531 struct hinic_aeq_elem *aeqe;
532 int i;
533
534 for (i = 0; i < eq->q_len; i++) {
535 aeqe = GET_AEQ_ELEM(eq, i);
536 aeqe->desc = cpu_to_be32(init_val);
537 }
538
539 wmb();
540}
541
542
543
544
545
546
547static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
548{
549 u32 *ceqe;
550 int i;
551
552 for (i = 0; i < eq->q_len; i++) {
553 ceqe = GET_CEQ_ELEM(eq, i);
554 *(ceqe) = cpu_to_be32(init_val);
555 }
556
557 wmb();
558}
559
560
561
562
563
564
565
566static int alloc_eq_pages(struct hinic_eq *eq)
567{
568 struct hinic_hwif *hwif = eq->hwif;
569 struct pci_dev *pdev = hwif->pdev;
570 u32 init_val, addr, val;
571 size_t addr_size;
572 int err, pg;
573
574 addr_size = eq->num_pages * sizeof(*eq->dma_addr);
575 eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
576 if (!eq->dma_addr)
577 return -ENOMEM;
578
579 addr_size = eq->num_pages * sizeof(*eq->virt_addr);
580 eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
581 if (!eq->virt_addr) {
582 err = -ENOMEM;
583 goto err_virt_addr_alloc;
584 }
585
586 for (pg = 0; pg < eq->num_pages; pg++) {
587 eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
588 eq->page_size,
589 &eq->dma_addr[pg],
590 GFP_KERNEL);
591 if (!eq->virt_addr[pg]) {
592 err = -ENOMEM;
593 goto err_dma_alloc;
594 }
595
596 addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
597 val = upper_32_bits(eq->dma_addr[pg]);
598
599 hinic_hwif_write_reg(hwif, addr, val);
600
601 addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
602 val = lower_32_bits(eq->dma_addr[pg]);
603
604 hinic_hwif_write_reg(hwif, addr, val);
605 }
606
607 init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED);
608
609 if (eq->type == HINIC_AEQ)
610 aeq_elements_init(eq, init_val);
611 else if (eq->type == HINIC_CEQ)
612 ceq_elements_init(eq, init_val);
613
614 return 0;
615
616err_dma_alloc:
617 while (--pg >= 0)
618 dma_free_coherent(&pdev->dev, eq->page_size,
619 eq->virt_addr[pg],
620 eq->dma_addr[pg]);
621
622 devm_kfree(&pdev->dev, eq->virt_addr);
623
624err_virt_addr_alloc:
625 devm_kfree(&pdev->dev, eq->dma_addr);
626 return err;
627}
628
629
630
631
632
633static void free_eq_pages(struct hinic_eq *eq)
634{
635 struct hinic_hwif *hwif = eq->hwif;
636 struct pci_dev *pdev = hwif->pdev;
637 int pg;
638
639 for (pg = 0; pg < eq->num_pages; pg++)
640 dma_free_coherent(&pdev->dev, eq->page_size,
641 eq->virt_addr[pg],
642 eq->dma_addr[pg]);
643
644 devm_kfree(&pdev->dev, eq->virt_addr);
645 devm_kfree(&pdev->dev, eq->dma_addr);
646}
647
648
649
650
651
652
653
654
655
656
657
658
659
660static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
661 enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size,
662 struct msix_entry entry)
663{
664 struct pci_dev *pdev = hwif->pdev;
665 int err;
666
667 eq->hwif = hwif;
668 eq->type = type;
669 eq->q_id = q_id;
670 eq->q_len = q_len;
671 eq->page_size = page_size;
672
673
674 hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
675 hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
676
677 eq->cons_idx = 0;
678 eq->wrapped = 0;
679
680 if (type == HINIC_AEQ) {
681 eq->elem_size = HINIC_AEQE_SIZE;
682 } else if (type == HINIC_CEQ) {
683 eq->elem_size = HINIC_CEQE_SIZE;
684 } else {
685 dev_err(&pdev->dev, "Invalid EQ type\n");
686 return -EINVAL;
687 }
688
689 eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
690 eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size);
691
692 eq->msix_entry = entry;
693
694 if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
695 dev_err(&pdev->dev, "num elements in eq page != power of 2\n");
696 return -EINVAL;
697 }
698
699 if (eq->num_pages > EQ_MAX_PAGES) {
700 dev_err(&pdev->dev, "too many pages for eq\n");
701 return -EINVAL;
702 }
703
704 set_eq_ctrls(eq);
705 eq_update_ci(eq);
706
707 err = alloc_eq_pages(eq);
708 if (err) {
709 dev_err(&pdev->dev, "Failed to allocate pages for eq\n");
710 return err;
711 }
712
713 if (type == HINIC_AEQ) {
714 struct hinic_eq_work *aeq_work = &eq->aeq_work;
715
716 INIT_WORK(&aeq_work->work, eq_irq_work);
717 } else if (type == HINIC_CEQ) {
718 tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
719 (unsigned long)eq);
720 }
721
722
723 hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry,
724 HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT,
725 HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT,
726 HINIC_EQ_MSIX_LLI_TIMER_DEFAULT,
727 HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
728 HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
729
730 if (type == HINIC_AEQ)
731 err = request_irq(entry.vector, aeq_interrupt, 0,
732 "hinic_aeq", eq);
733 else if (type == HINIC_CEQ)
734 err = request_irq(entry.vector, ceq_interrupt, 0,
735 "hinic_ceq", eq);
736
737 if (err) {
738 dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
739 goto err_req_irq;
740 }
741
742 return 0;
743
744err_req_irq:
745 free_eq_pages(eq);
746 return err;
747}
748
749
750
751
752
753static void remove_eq(struct hinic_eq *eq)
754{
755 struct msix_entry *entry = &eq->msix_entry;
756
757 free_irq(entry->vector, eq);
758
759 if (eq->type == HINIC_AEQ) {
760 struct hinic_eq_work *aeq_work = &eq->aeq_work;
761
762 cancel_work_sync(&aeq_work->work);
763 } else if (eq->type == HINIC_CEQ) {
764 tasklet_kill(&eq->ceq_tasklet);
765 }
766
767 free_eq_pages(eq);
768}
769
770
771
772
773
774
775
776
777
778
779
780
781int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
782 int num_aeqs, u32 q_len, u32 page_size,
783 struct msix_entry *msix_entries)
784{
785 struct pci_dev *pdev = hwif->pdev;
786 int err, i, q_id;
787
788 aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
789 if (!aeqs->workq)
790 return -ENOMEM;
791
792 aeqs->hwif = hwif;
793 aeqs->num_aeqs = num_aeqs;
794
795 for (q_id = 0; q_id < num_aeqs; q_id++) {
796 err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len,
797 page_size, msix_entries[q_id]);
798 if (err) {
799 dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id);
800 goto err_init_aeq;
801 }
802 }
803
804 return 0;
805
806err_init_aeq:
807 for (i = 0; i < q_id; i++)
808 remove_eq(&aeqs->aeq[i]);
809
810 destroy_workqueue(aeqs->workq);
811 return err;
812}
813
814
815
816
817
818void hinic_aeqs_free(struct hinic_aeqs *aeqs)
819{
820 int q_id;
821
822 for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++)
823 remove_eq(&aeqs->aeq[q_id]);
824
825 destroy_workqueue(aeqs->workq);
826}
827
828
829
830
831
832
833
834
835
836
837
838
839int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
840 int num_ceqs, u32 q_len, u32 page_size,
841 struct msix_entry *msix_entries)
842{
843 struct pci_dev *pdev = hwif->pdev;
844 int i, q_id, err;
845
846 ceqs->hwif = hwif;
847 ceqs->num_ceqs = num_ceqs;
848
849 for (q_id = 0; q_id < num_ceqs; q_id++) {
850 err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
851 page_size, msix_entries[q_id]);
852 if (err) {
853 dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id);
854 goto err_init_ceq;
855 }
856 }
857
858 return 0;
859
860err_init_ceq:
861 for (i = 0; i < q_id; i++)
862 remove_eq(&ceqs->ceq[i]);
863
864 return err;
865}
866
867
868
869
870
871void hinic_ceqs_free(struct hinic_ceqs *ceqs)
872{
873 int q_id;
874
875 for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
876 remove_eq(&ceqs->ceq[q_id]);
877}
878