1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/errno.h>
10#include <linux/pci.h>
11#include <linux/device.h>
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14#include <linux/spinlock.h>
15#include <linux/sizes.h>
16#include <linux/atomic.h>
17#include <linux/log2.h>
18#include <linux/io.h>
19#include <linux/completion.h>
20#include <linux/err.h>
21#include <asm/byteorder.h>
22#include <asm/barrier.h>
23
24#include "hinic_common.h"
25#include "hinic_hw_if.h"
26#include "hinic_hw_eqs.h"
27#include "hinic_hw_mgmt.h"
28#include "hinic_hw_wqe.h"
29#include "hinic_hw_wq.h"
30#include "hinic_hw_cmdq.h"
31#include "hinic_hw_io.h"
32#include "hinic_hw_dev.h"
33
34#define CMDQ_CEQE_TYPE_SHIFT 0
35
36#define CMDQ_CEQE_TYPE_MASK 0x7
37
38#define CMDQ_CEQE_GET(val, member) \
39 (((val) >> CMDQ_CEQE_##member##_SHIFT) \
40 & CMDQ_CEQE_##member##_MASK)
41
42#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20
43
44#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF
45
46#define CMDQ_WQE_ERRCODE_GET(val, member) \
47 (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
48 & CMDQ_WQE_ERRCODE_##member##_MASK)
49
50#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
51
52#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
53
54#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
55
56#define CMDQ_WQE_COMPLETED(ctrl_info) \
57 HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
58
59#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
60
61#define CMDQ_DB_OFF SZ_2K
62
63#define CMDQ_WQEBB_SIZE 64
64#define CMDQ_WQE_SIZE 64
65#define CMDQ_DEPTH SZ_4K
66
67#define CMDQ_WQ_PAGE_SIZE SZ_4K
68
69#define WQE_LCMD_SIZE 64
70#define WQE_SCMD_SIZE 64
71
72#define COMPLETE_LEN 3
73
74#define CMDQ_TIMEOUT 1000
75
76#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
77
78#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
79 struct hinic_cmdqs, cmdq[0])
80
81#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
82 struct hinic_func_to_io, \
83 cmdqs)
84
85enum cmdq_wqe_type {
86 WQE_LCMD_TYPE = 0,
87 WQE_SCMD_TYPE = 1,
88};
89
90enum completion_format {
91 COMPLETE_DIRECT = 0,
92 COMPLETE_SGE = 1,
93};
94
95enum data_format {
96 DATA_SGE = 0,
97 DATA_DIRECT = 1,
98};
99
100enum bufdesc_len {
101 BUFDESC_LCMD_LEN = 2,
102 BUFDESC_SCMD_LEN = 3,
103};
104
105enum ctrl_sect_len {
106 CTRL_SECT_LEN = 1,
107 CTRL_DIRECT_SECT_LEN = 2,
108};
109
110enum cmdq_scmd_type {
111 CMDQ_SET_ARM_CMD = 2,
112};
113
114enum cmdq_cmd_type {
115 CMDQ_CMD_SYNC_DIRECT_RESP = 0,
116 CMDQ_CMD_SYNC_SGE_RESP = 1,
117};
118
119enum completion_request {
120 NO_CEQ = 0,
121 CEQ_SET = 1,
122};
123
124
125
126
127
128
129
130
131int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
132 struct hinic_cmdq_buf *cmdq_buf)
133{
134 struct hinic_hwif *hwif = cmdqs->hwif;
135 struct pci_dev *pdev = hwif->pdev;
136
137 cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
138 &cmdq_buf->dma_addr);
139 if (!cmdq_buf->buf) {
140 dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
141 return -ENOMEM;
142 }
143
144 return 0;
145}
146
147
148
149
150
151
152void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
153 struct hinic_cmdq_buf *cmdq_buf)
154{
155 dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
156}
157
158static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
159{
160 unsigned int wqe_size = 0;
161
162 switch (len) {
163 case BUFDESC_LCMD_LEN:
164 wqe_size = WQE_LCMD_SIZE;
165 break;
166 case BUFDESC_SCMD_LEN:
167 wqe_size = WQE_SCMD_SIZE;
168 break;
169 }
170
171 return wqe_size;
172}
173
174static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
175 struct hinic_cmdq_buf *buf_out)
176{
177 struct hinic_sge_resp *sge_resp = &completion->sge_resp;
178
179 hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size);
180}
181
182static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
183 enum hinic_cmd_ack_type ack_type,
184 enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
185 enum completion_format complete_format,
186 enum data_format data_format,
187 enum bufdesc_len buf_len)
188{
189 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
190 struct hinic_cmdq_wqe_scmd *wqe_scmd;
191 enum ctrl_sect_len ctrl_len;
192 struct hinic_ctrl *ctrl;
193 u32 saved_data;
194
195 if (data_format == DATA_SGE) {
196 wqe_lcmd = &wqe->wqe_lcmd;
197
198 wqe_lcmd->status.status_info = 0;
199 ctrl = &wqe_lcmd->ctrl;
200 ctrl_len = CTRL_SECT_LEN;
201 } else {
202 wqe_scmd = &wqe->direct_wqe.wqe_scmd;
203
204 wqe_scmd->status.status_info = 0;
205 ctrl = &wqe_scmd->ctrl;
206 ctrl_len = CTRL_DIRECT_SECT_LEN;
207 }
208
209 ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) |
210 HINIC_CMDQ_CTRL_SET(cmd, CMD) |
211 HINIC_CMDQ_CTRL_SET(mod, MOD) |
212 HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE);
213
214 CMDQ_WQE_HEADER(wqe)->header_info =
215 HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
216 HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
217 HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
218 HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
219 HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
220 HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
221 HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED);
222
223 saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
224 saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
225
226 if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM))
227 CMDQ_WQE_HEADER(wqe)->saved_data |=
228 HINIC_SAVED_DATA_SET(1, ARM);
229 else
230 CMDQ_WQE_HEADER(wqe)->saved_data = saved_data;
231}
232
233static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
234 struct hinic_cmdq_buf *buf_in)
235{
236 hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size);
237}
238
239static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe,
240 void *buf_in, u32 in_size)
241{
242 struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
243
244 wqe_scmd->buf_desc.buf_len = in_size;
245 memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
246}
247
248static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
249 enum cmdq_cmd_type cmd_type,
250 struct hinic_cmdq_buf *buf_in,
251 struct hinic_cmdq_buf *buf_out, int wrapped,
252 enum hinic_cmd_ack_type ack_type,
253 enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
254{
255 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
256 enum completion_format complete_format;
257
258 switch (cmd_type) {
259 case CMDQ_CMD_SYNC_SGE_RESP:
260 complete_format = COMPLETE_SGE;
261 cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out);
262 break;
263 case CMDQ_CMD_SYNC_DIRECT_RESP:
264 complete_format = COMPLETE_DIRECT;
265 wqe_lcmd->completion.direct_resp = 0;
266 break;
267 }
268
269 cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
270 prod_idx, complete_format, DATA_SGE,
271 BUFDESC_LCMD_LEN);
272
273 cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
274}
275
276static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe,
277 enum cmdq_cmd_type cmd_type,
278 void *buf_in, u16 in_size,
279 struct hinic_cmdq_buf *buf_out, int wrapped,
280 enum hinic_cmd_ack_type ack_type,
281 enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
282{
283 struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
284 enum completion_format complete_format;
285 struct hinic_cmdq_wqe_scmd *wqe_scmd;
286
287 wqe_scmd = &direct_wqe->wqe_scmd;
288
289 switch (cmd_type) {
290 case CMDQ_CMD_SYNC_SGE_RESP:
291 complete_format = COMPLETE_SGE;
292 cmdq_set_sge_completion(&wqe_scmd->completion, buf_out);
293 break;
294 case CMDQ_CMD_SYNC_DIRECT_RESP:
295 complete_format = COMPLETE_DIRECT;
296 wqe_scmd->completion.direct_resp = 0;
297 break;
298 }
299
300 cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
301 complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
302
303 cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size);
304}
305
306static void cmdq_wqe_fill(void *dst, void *src)
307{
308 memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
309 CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
310
311 wmb();
312
313 *(u64 *)dst = *(u64 *)src;
314}
315
316static void cmdq_fill_db(u32 *db_info,
317 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
318{
319 *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
320 HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) |
321 HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
322 HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE);
323}
324
325static void cmdq_set_db(struct hinic_cmdq *cmdq,
326 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
327{
328 u32 db_info;
329
330 cmdq_fill_db(&db_info, cmdq_type, prod_idx);
331
332
333 db_info = cpu_to_be32(db_info);
334
335 wmb();
336
337 writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
338}
339
340static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
341 enum hinic_mod_type mod, u8 cmd,
342 struct hinic_cmdq_buf *buf_in,
343 u64 *resp)
344{
345 struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
346 u16 curr_prod_idx, next_prod_idx;
347 int errcode, wrapped, num_wqebbs;
348 struct hinic_wq *wq = cmdq->wq;
349 struct hinic_hw_wqe *hw_wqe;
350 struct completion done;
351
352
353 spin_lock_bh(&cmdq->cmdq_lock);
354
355
356 hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx);
357 if (IS_ERR(hw_wqe)) {
358 spin_unlock_bh(&cmdq->cmdq_lock);
359 return -EBUSY;
360 }
361
362 curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
363
364 wrapped = cmdq->wrapped;
365
366 num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
367 next_prod_idx = curr_prod_idx + num_wqebbs;
368 if (next_prod_idx >= wq->q_depth) {
369 cmdq->wrapped = !cmdq->wrapped;
370 next_prod_idx -= wq->q_depth;
371 }
372
373 cmdq->errcode[curr_prod_idx] = &errcode;
374
375 init_completion(&done);
376 cmdq->done[curr_prod_idx] = &done;
377
378 cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL,
379 wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd,
380 curr_prod_idx);
381
382
383 hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE);
384
385
386 cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
387
388 cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
389
390 spin_unlock_bh(&cmdq->cmdq_lock);
391
392 if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
393 spin_lock_bh(&cmdq->cmdq_lock);
394
395 if (cmdq->errcode[curr_prod_idx] == &errcode)
396 cmdq->errcode[curr_prod_idx] = NULL;
397
398 if (cmdq->done[curr_prod_idx] == &done)
399 cmdq->done[curr_prod_idx] = NULL;
400
401 spin_unlock_bh(&cmdq->cmdq_lock);
402
403 return -ETIMEDOUT;
404 }
405
406 smp_rmb();
407
408 if (resp) {
409 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd;
410
411 *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp);
412 }
413
414 if (errcode != 0)
415 return -EFAULT;
416
417 return 0;
418}
419
420static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
421 u16 in_size)
422{
423 struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
424 u16 curr_prod_idx, next_prod_idx;
425 struct hinic_wq *wq = cmdq->wq;
426 struct hinic_hw_wqe *hw_wqe;
427 int wrapped, num_wqebbs;
428
429
430 spin_lock(&cmdq->cmdq_lock);
431
432
433 hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx);
434 if (IS_ERR(hw_wqe)) {
435 spin_unlock(&cmdq->cmdq_lock);
436 return -EBUSY;
437 }
438
439 curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
440
441 wrapped = cmdq->wrapped;
442
443 num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
444 next_prod_idx = curr_prod_idx + num_wqebbs;
445 if (next_prod_idx >= wq->q_depth) {
446 cmdq->wrapped = !cmdq->wrapped;
447 next_prod_idx -= wq->q_depth;
448 }
449
450 cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in,
451 in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ,
452 HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx);
453
454
455 hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE);
456
457
458 cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
459
460 cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
461
462 spin_unlock(&cmdq->cmdq_lock);
463 return 0;
464}
465
466static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
467{
468 if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
469 return -EINVAL;
470
471 return 0;
472}
473
474
475
476
477
478
479
480
481
482
483
484int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
485 enum hinic_mod_type mod, u8 cmd,
486 struct hinic_cmdq_buf *buf_in, u64 *resp)
487{
488 struct hinic_hwif *hwif = cmdqs->hwif;
489 struct pci_dev *pdev = hwif->pdev;
490 int err;
491
492 err = cmdq_params_valid(buf_in);
493 if (err) {
494 dev_err(&pdev->dev, "Invalid CMDQ parameters\n");
495 return err;
496 }
497
498 return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
499 mod, cmd, buf_in, resp);
500}
501
502
503
504
505
506
507
508
509
510int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
511 enum hinic_set_arm_qtype q_type, u32 q_id)
512{
513 struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
514 struct hinic_hwif *hwif = cmdqs->hwif;
515 struct pci_dev *pdev = hwif->pdev;
516 struct hinic_cmdq_arm_bit arm_bit;
517 int err;
518
519 arm_bit.q_type = q_type;
520 arm_bit.q_id = q_id;
521
522 err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit));
523 if (err) {
524 dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id);
525 return err;
526 }
527
528 return 0;
529}
530
531static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
532 struct hinic_cmdq_wqe *wqe)
533{
534 u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info);
535 unsigned int bufdesc_len, wqe_size;
536 struct hinic_ctrl *ctrl;
537
538 bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
539 wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len);
540 if (wqe_size == WQE_LCMD_SIZE) {
541 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
542
543 ctrl = &wqe_lcmd->ctrl;
544 } else {
545 struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
546 struct hinic_cmdq_wqe_scmd *wqe_scmd;
547
548 wqe_scmd = &direct_wqe->wqe_scmd;
549 ctrl = &wqe_scmd->ctrl;
550 }
551
552
553 ctrl->ctrl_info = 0;
554
555 wmb();
556}
557
558
559
560
561
562
563
564
565static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
566 struct hinic_cmdq_wqe *wqe)
567{
568 struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
569 struct hinic_cmdq_wqe_scmd *wqe_scmd;
570 struct hinic_ctrl *ctrl;
571 u32 ctrl_info;
572
573 wqe_scmd = &direct_wqe->wqe_scmd;
574 ctrl = &wqe_scmd->ctrl;
575 ctrl_info = be32_to_cpu(ctrl->ctrl_info);
576
577
578 if (!CMDQ_WQE_COMPLETED(ctrl_info))
579 return -EBUSY;
580
581 clear_wqe_complete_bit(cmdq, wqe);
582
583 hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE);
584 return 0;
585}
586
587static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
588 int errcode)
589{
590 if (cmdq->errcode[prod_idx])
591 *cmdq->errcode[prod_idx] = errcode;
592}
593
594
595
596
597
598
599
600static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
601 int errcode)
602{
603 u16 prod_idx = cons_idx;
604
605 spin_lock(&cmdq->cmdq_lock);
606 cmdq_update_errcode(cmdq, prod_idx, errcode);
607
608 wmb();
609
610 if (cmdq->done[prod_idx])
611 complete(cmdq->done[prod_idx]);
612 spin_unlock(&cmdq->cmdq_lock);
613}
614
615static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
616 struct hinic_cmdq_wqe *cmdq_wqe)
617{
618 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd;
619 struct hinic_status *status = &wqe_lcmd->status;
620 struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl;
621 int errcode;
622
623 if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
624 return -EBUSY;
625
626 errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
627
628 cmdq_sync_cmd_handler(cmdq, ci, errcode);
629
630 clear_wqe_complete_bit(cmdq, cmdq_wqe);
631 hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE);
632 return 0;
633}
634
635
636
637
638
639
640static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
641{
642 enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
643 struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle;
644 struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
645 struct hinic_cmdq_header *header;
646 struct hinic_hw_wqe *hw_wqe;
647 int err, set_arm = 0;
648 u32 saved_data;
649 u16 ci;
650
651
652 while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) {
653 if (IS_ERR(hw_wqe))
654 break;
655
656 header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe);
657 saved_data = be32_to_cpu(header->saved_data);
658
659 if (HINIC_SAVED_DATA_GET(saved_data, ARM)) {
660
661 set_arm = 0;
662
663 if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe))
664 break;
665 } else {
666 set_arm = 1;
667
668 hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci);
669 if (IS_ERR(hw_wqe))
670 break;
671
672 if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe))
673 break;
674 }
675 }
676
677 if (set_arm) {
678 struct hinic_hwif *hwif = cmdqs->hwif;
679 struct pci_dev *pdev = hwif->pdev;
680
681 err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type);
682 if (err)
683 dev_err(&pdev->dev, "Failed to set arm for CMDQ\n");
684 }
685}
686
687
688
689
690
691
692
693static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
694 struct hinic_cmdq *cmdq,
695 struct hinic_cmdq_pages *cmdq_pages)
696{
697 struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
698 u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
699 struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
700 struct hinic_wq *wq = cmdq->wq;
701
702
703 wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
704
705 pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size);
706
707 ctxt_info->curr_wqe_page_pfn =
708 HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
709 HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
710 HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
711 HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
712 HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
713
714
715 cmdq_first_block_paddr = cmdq_pages->page_paddr;
716
717 pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
718
719 ctxt_info->wq_block_pfn =
720 HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
721 HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
722
723 cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
724 cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
725}
726
727
728
729
730
731
732
733
734
735
736static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
737 enum hinic_cmdq_type q_type, void __iomem *db_area)
738{
739 int err;
740
741 cmdq->wq = wq;
742 cmdq->cmdq_type = q_type;
743 cmdq->wrapped = 1;
744
745 spin_lock_init(&cmdq->cmdq_lock);
746
747 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
748 if (!cmdq->done)
749 return -ENOMEM;
750
751 cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode),
752 wq->q_depth));
753 if (!cmdq->errcode) {
754 err = -ENOMEM;
755 goto err_errcode;
756 }
757
758 cmdq->db_base = db_area + CMDQ_DB_OFF;
759 return 0;
760
761err_errcode:
762 vfree(cmdq->done);
763 return err;
764}
765
766
767
768
769
770static void free_cmdq(struct hinic_cmdq *cmdq)
771{
772 vfree(cmdq->errcode);
773 vfree(cmdq->done);
774}
775
776
777
778
779
780
781
782
783
784static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
785 struct hinic_cmdqs *cmdqs, void __iomem **db_area)
786{
787 struct hinic_hwif *hwif = hwdev->hwif;
788 enum hinic_cmdq_type type, cmdq_type;
789 struct hinic_cmdq_ctxt *cmdq_ctxts;
790 struct pci_dev *pdev = hwif->pdev;
791 struct hinic_pfhwdev *pfhwdev;
792 size_t cmdq_ctxts_size;
793 int err;
794
795 if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
796 dev_err(&pdev->dev, "Unsupported PCI function type\n");
797 return -EINVAL;
798 }
799
800 cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
801 cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
802 if (!cmdq_ctxts)
803 return -ENOMEM;
804
805 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
806
807 cmdq_type = HINIC_CMDQ_SYNC;
808 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
809 err = init_cmdq(&cmdqs->cmdq[cmdq_type],
810 &cmdqs->saved_wqs[cmdq_type], cmdq_type,
811 db_area[cmdq_type]);
812 if (err) {
813 dev_err(&pdev->dev, "Failed to initialize cmdq\n");
814 goto err_init_cmdq;
815 }
816
817 cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type],
818 &cmdqs->cmdq[cmdq_type],
819 &cmdqs->cmdq_pages);
820 }
821
822
823 cmdq_type = HINIC_CMDQ_SYNC;
824 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
825 err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
826 HINIC_COMM_CMD_CMDQ_CTXT_SET,
827 &cmdq_ctxts[cmdq_type],
828 sizeof(cmdq_ctxts[cmdq_type]),
829 NULL, NULL, HINIC_MGMT_MSG_SYNC);
830 if (err) {
831 dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
832 cmdq_type);
833 goto err_write_cmdq_ctxt;
834 }
835 }
836
837 devm_kfree(&pdev->dev, cmdq_ctxts);
838 return 0;
839
840err_write_cmdq_ctxt:
841 cmdq_type = HINIC_MAX_CMDQ_TYPES;
842
843err_init_cmdq:
844 for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
845 free_cmdq(&cmdqs->cmdq[type]);
846
847 devm_kfree(&pdev->dev, cmdq_ctxts);
848 return err;
849}
850
851
852
853
854
855
856
857
858
859int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
860 void __iomem **db_area)
861{
862 struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
863 struct pci_dev *pdev = hwif->pdev;
864 struct hinic_hwdev *hwdev;
865 size_t saved_wqs_size;
866 u16 max_wqe_size;
867 int err;
868
869 cmdqs->hwif = hwif;
870 cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev,
871 HINIC_CMDQ_BUF_SIZE,
872 HINIC_CMDQ_BUF_SIZE, 0);
873 if (!cmdqs->cmdq_buf_pool)
874 return -ENOMEM;
875
876 saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
877 cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
878 if (!cmdqs->saved_wqs) {
879 err = -ENOMEM;
880 goto err_saved_wqs;
881 }
882
883 max_wqe_size = WQE_LCMD_SIZE;
884 err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif,
885 HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
886 CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
887 if (err) {
888 dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
889 goto err_cmdq_wqs;
890 }
891
892 hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
893 err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
894 if (err) {
895 dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
896 goto err_cmdq_ctxt;
897 }
898
899 hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
900 cmdq_ceq_handler);
901 return 0;
902
903err_cmdq_ctxt:
904 hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
905 HINIC_MAX_CMDQ_TYPES);
906
907err_cmdq_wqs:
908 devm_kfree(&pdev->dev, cmdqs->saved_wqs);
909
910err_saved_wqs:
911 dma_pool_destroy(cmdqs->cmdq_buf_pool);
912 return err;
913}
914
915
916
917
918
919void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
920{
921 struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
922 struct hinic_hwif *hwif = cmdqs->hwif;
923 struct pci_dev *pdev = hwif->pdev;
924 enum hinic_cmdq_type cmdq_type;
925
926 hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
927
928 cmdq_type = HINIC_CMDQ_SYNC;
929 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
930 free_cmdq(&cmdqs->cmdq[cmdq_type]);
931
932 hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
933 HINIC_MAX_CMDQ_TYPES);
934
935 devm_kfree(&pdev->dev, cmdqs->saved_wqs);
936
937 dma_pool_destroy(cmdqs->cmdq_buf_pool);
938}
939