1
2
3
4#include "ice_common.h"
5
6#define ICE_CQ_INIT_REGS(qinfo, prefix) \
7do { \
8 (qinfo)->sq.head = prefix##_ATQH; \
9 (qinfo)->sq.tail = prefix##_ATQT; \
10 (qinfo)->sq.len = prefix##_ATQLEN; \
11 (qinfo)->sq.bah = prefix##_ATQBAH; \
12 (qinfo)->sq.bal = prefix##_ATQBAL; \
13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
17 (qinfo)->rq.head = prefix##_ARQH; \
18 (qinfo)->rq.tail = prefix##_ARQT; \
19 (qinfo)->rq.len = prefix##_ARQLEN; \
20 (qinfo)->rq.bah = prefix##_ARQBAH; \
21 (qinfo)->rq.bal = prefix##_ARQBAL; \
22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
26} while (0)
27
28
29
30
31
32
33
34static void ice_adminq_init_regs(struct ice_hw *hw)
35{
36 struct ice_ctl_q_info *cq = &hw->adminq;
37
38 ICE_CQ_INIT_REGS(cq, PF_FW);
39}
40
41
42
43
44
45
46
47static void ice_mailbox_init_regs(struct ice_hw *hw)
48{
49 struct ice_ctl_q_info *cq = &hw->mailboxq;
50
51 ICE_CQ_INIT_REGS(cq, PF_MBX);
52}
53
54
55
56
57
58
59
60
61bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
62{
63
64 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
65 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
66 cq->sq.len_ena_mask)) ==
67 (cq->num_sq_entries | cq->sq.len_ena_mask);
68
69 return false;
70}
71
72
73
74
75
76
77static enum ice_status
78ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
79{
80 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
81
82 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
83 &cq->sq.desc_buf.pa,
84 GFP_KERNEL | __GFP_ZERO);
85 if (!cq->sq.desc_buf.va)
86 return ICE_ERR_NO_MEMORY;
87 cq->sq.desc_buf.size = size;
88
89 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
90 sizeof(struct ice_sq_cd), GFP_KERNEL);
91 if (!cq->sq.cmd_buf) {
92 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
93 cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
94 cq->sq.desc_buf.va = NULL;
95 cq->sq.desc_buf.pa = 0;
96 cq->sq.desc_buf.size = 0;
97 return ICE_ERR_NO_MEMORY;
98 }
99
100 return 0;
101}
102
103
104
105
106
107
108static enum ice_status
109ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
110{
111 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
112
113 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
114 &cq->rq.desc_buf.pa,
115 GFP_KERNEL | __GFP_ZERO);
116 if (!cq->rq.desc_buf.va)
117 return ICE_ERR_NO_MEMORY;
118 cq->rq.desc_buf.size = size;
119 return 0;
120}
121
122
123
124
125
126
127
128
129
130static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
131{
132 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
133 ring->desc_buf.va, ring->desc_buf.pa);
134 ring->desc_buf.va = NULL;
135 ring->desc_buf.pa = 0;
136 ring->desc_buf.size = 0;
137}
138
139
140
141
142
143
144static enum ice_status
145ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
146{
147 int i;
148
149
150
151
152 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
153 sizeof(cq->rq.desc_buf), GFP_KERNEL);
154 if (!cq->rq.dma_head)
155 return ICE_ERR_NO_MEMORY;
156 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
157
158
159 for (i = 0; i < cq->num_rq_entries; i++) {
160 struct ice_aq_desc *desc;
161 struct ice_dma_mem *bi;
162
163 bi = &cq->rq.r.rq_bi[i];
164 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
165 cq->rq_buf_size, &bi->pa,
166 GFP_KERNEL | __GFP_ZERO);
167 if (!bi->va)
168 goto unwind_alloc_rq_bufs;
169 bi->size = cq->rq_buf_size;
170
171
172 desc = ICE_CTL_Q_DESC(cq->rq, i);
173
174 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
175 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
176 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
177 desc->opcode = 0;
178
179
180
181 desc->datalen = cpu_to_le16(bi->size);
182 desc->retval = 0;
183 desc->cookie_high = 0;
184 desc->cookie_low = 0;
185 desc->params.generic.addr_high =
186 cpu_to_le32(upper_32_bits(bi->pa));
187 desc->params.generic.addr_low =
188 cpu_to_le32(lower_32_bits(bi->pa));
189 desc->params.generic.param0 = 0;
190 desc->params.generic.param1 = 0;
191 }
192 return 0;
193
194unwind_alloc_rq_bufs:
195
196 i--;
197 for (; i >= 0; i--) {
198 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
199 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
200 cq->rq.r.rq_bi[i].va = NULL;
201 cq->rq.r.rq_bi[i].pa = 0;
202 cq->rq.r.rq_bi[i].size = 0;
203 }
204 cq->rq.r.rq_bi = NULL;
205 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
206 cq->rq.dma_head = NULL;
207
208 return ICE_ERR_NO_MEMORY;
209}
210
211
212
213
214
215
216static enum ice_status
217ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
218{
219 int i;
220
221
222 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
223 sizeof(cq->sq.desc_buf), GFP_KERNEL);
224 if (!cq->sq.dma_head)
225 return ICE_ERR_NO_MEMORY;
226 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
227
228
229 for (i = 0; i < cq->num_sq_entries; i++) {
230 struct ice_dma_mem *bi;
231
232 bi = &cq->sq.r.sq_bi[i];
233 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
234 cq->sq_buf_size, &bi->pa,
235 GFP_KERNEL | __GFP_ZERO);
236 if (!bi->va)
237 goto unwind_alloc_sq_bufs;
238 bi->size = cq->sq_buf_size;
239 }
240 return 0;
241
242unwind_alloc_sq_bufs:
243
244 i--;
245 for (; i >= 0; i--) {
246 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
247 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
248 cq->sq.r.sq_bi[i].va = NULL;
249 cq->sq.r.sq_bi[i].pa = 0;
250 cq->sq.r.sq_bi[i].size = 0;
251 }
252 cq->sq.r.sq_bi = NULL;
253 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
254 cq->sq.dma_head = NULL;
255
256 return ICE_ERR_NO_MEMORY;
257}
258
259static enum ice_status
260ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
261{
262
263 wr32(hw, ring->head, 0);
264 wr32(hw, ring->tail, 0);
265
266
267 wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
268 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
269 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
270
271
272 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
273 return ICE_ERR_AQ_ERROR;
274
275 return 0;
276}
277
278
279
280
281
282
283
284
285static enum ice_status
286ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
287{
288 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
289}
290
291
292
293
294
295
296
297
298static enum ice_status
299ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
300{
301 enum ice_status status;
302
303 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
304 if (status)
305 return status;
306
307
308 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
309
310 return 0;
311}
312
313#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
314do { \
315 \
316 if ((qi)->ring.r.ring##_bi) { \
317 int i; \
318 \
319 for (i = 0; i < (qi)->num_##ring##_entries; i++) \
320 if ((qi)->ring.r.ring##_bi[i].pa) { \
321 dmam_free_coherent(ice_hw_to_dev(hw), \
322 (qi)->ring.r.ring##_bi[i].size, \
323 (qi)->ring.r.ring##_bi[i].va, \
324 (qi)->ring.r.ring##_bi[i].pa); \
325 (qi)->ring.r.ring##_bi[i].va = NULL;\
326 (qi)->ring.r.ring##_bi[i].pa = 0;\
327 (qi)->ring.r.ring##_bi[i].size = 0;\
328 } \
329 } \
330 \
331 if ((qi)->ring.cmd_buf) \
332 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
333 \
334 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
335} while (0)
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
352{
353 enum ice_status ret_code;
354
355 if (cq->sq.count > 0) {
356
357 ret_code = ICE_ERR_NOT_READY;
358 goto init_ctrlq_exit;
359 }
360
361
362 if (!cq->num_sq_entries || !cq->sq_buf_size) {
363 ret_code = ICE_ERR_CFG;
364 goto init_ctrlq_exit;
365 }
366
367 cq->sq.next_to_use = 0;
368 cq->sq.next_to_clean = 0;
369
370
371 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
372 if (ret_code)
373 goto init_ctrlq_exit;
374
375
376 ret_code = ice_alloc_sq_bufs(hw, cq);
377 if (ret_code)
378 goto init_ctrlq_free_rings;
379
380
381 ret_code = ice_cfg_sq_regs(hw, cq);
382 if (ret_code)
383 goto init_ctrlq_free_rings;
384
385
386 cq->sq.count = cq->num_sq_entries;
387 goto init_ctrlq_exit;
388
389init_ctrlq_free_rings:
390 ICE_FREE_CQ_BUFS(hw, cq, sq);
391 ice_free_cq_ring(hw, &cq->sq);
392
393init_ctrlq_exit:
394 return ret_code;
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
412{
413 enum ice_status ret_code;
414
415 if (cq->rq.count > 0) {
416
417 ret_code = ICE_ERR_NOT_READY;
418 goto init_ctrlq_exit;
419 }
420
421
422 if (!cq->num_rq_entries || !cq->rq_buf_size) {
423 ret_code = ICE_ERR_CFG;
424 goto init_ctrlq_exit;
425 }
426
427 cq->rq.next_to_use = 0;
428 cq->rq.next_to_clean = 0;
429
430
431 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
432 if (ret_code)
433 goto init_ctrlq_exit;
434
435
436 ret_code = ice_alloc_rq_bufs(hw, cq);
437 if (ret_code)
438 goto init_ctrlq_free_rings;
439
440
441 ret_code = ice_cfg_rq_regs(hw, cq);
442 if (ret_code)
443 goto init_ctrlq_free_rings;
444
445
446 cq->rq.count = cq->num_rq_entries;
447 goto init_ctrlq_exit;
448
449init_ctrlq_free_rings:
450 ICE_FREE_CQ_BUFS(hw, cq, rq);
451 ice_free_cq_ring(hw, &cq->rq);
452
453init_ctrlq_exit:
454 return ret_code;
455}
456
457
458
459
460
461
462
463
464static enum ice_status
465ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
466{
467 enum ice_status ret_code = 0;
468
469 mutex_lock(&cq->sq_lock);
470
471 if (!cq->sq.count) {
472 ret_code = ICE_ERR_NOT_READY;
473 goto shutdown_sq_out;
474 }
475
476
477 wr32(hw, cq->sq.head, 0);
478 wr32(hw, cq->sq.tail, 0);
479 wr32(hw, cq->sq.len, 0);
480 wr32(hw, cq->sq.bal, 0);
481 wr32(hw, cq->sq.bah, 0);
482
483 cq->sq.count = 0;
484
485
486 ICE_FREE_CQ_BUFS(hw, cq, sq);
487 ice_free_cq_ring(hw, &cq->sq);
488
489shutdown_sq_out:
490 mutex_unlock(&cq->sq_lock);
491 return ret_code;
492}
493
494
495
496
497
498
499
500
501
502static bool ice_aq_ver_check(struct ice_hw *hw)
503{
504 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
505
506 dev_warn(ice_hw_to_dev(hw),
507 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
508 return false;
509 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
510 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
511 dev_info(ice_hw_to_dev(hw),
512 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
513 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
514 dev_info(ice_hw_to_dev(hw),
515 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
516 } else {
517
518 dev_info(ice_hw_to_dev(hw),
519 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
520 }
521 return true;
522}
523
524
525
526
527
528
529
530
531static enum ice_status
532ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
533{
534 enum ice_status ret_code = 0;
535
536 mutex_lock(&cq->rq_lock);
537
538 if (!cq->rq.count) {
539 ret_code = ICE_ERR_NOT_READY;
540 goto shutdown_rq_out;
541 }
542
543
544 wr32(hw, cq->rq.head, 0);
545 wr32(hw, cq->rq.tail, 0);
546 wr32(hw, cq->rq.len, 0);
547 wr32(hw, cq->rq.bal, 0);
548 wr32(hw, cq->rq.bah, 0);
549
550
551 cq->rq.count = 0;
552
553
554 ICE_FREE_CQ_BUFS(hw, cq, rq);
555 ice_free_cq_ring(hw, &cq->rq);
556
557shutdown_rq_out:
558 mutex_unlock(&cq->rq_lock);
559 return ret_code;
560}
561
562
563
564
565
566static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
567{
568 struct ice_ctl_q_info *cq = &hw->adminq;
569 enum ice_status status;
570
571 status = ice_aq_get_fw_ver(hw, NULL);
572 if (status)
573 goto init_ctrlq_free_rq;
574
575 if (!ice_aq_ver_check(hw)) {
576 status = ICE_ERR_FW_API_VER;
577 goto init_ctrlq_free_rq;
578 }
579
580 return 0;
581
582init_ctrlq_free_rq:
583 ice_shutdown_rq(hw, cq);
584 ice_shutdown_sq(hw, cq);
585 return status;
586}
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
603{
604 struct ice_ctl_q_info *cq;
605 enum ice_status ret_code;
606
607 switch (q_type) {
608 case ICE_CTL_Q_ADMIN:
609 ice_adminq_init_regs(hw);
610 cq = &hw->adminq;
611 break;
612 case ICE_CTL_Q_MAILBOX:
613 ice_mailbox_init_regs(hw);
614 cq = &hw->mailboxq;
615 break;
616 default:
617 return ICE_ERR_PARAM;
618 }
619 cq->qtype = q_type;
620
621
622 if (!cq->num_rq_entries || !cq->num_sq_entries ||
623 !cq->rq_buf_size || !cq->sq_buf_size) {
624 return ICE_ERR_CFG;
625 }
626
627
628 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
629
630
631 ret_code = ice_init_sq(hw, cq);
632 if (ret_code)
633 return ret_code;
634
635
636 ret_code = ice_init_rq(hw, cq);
637 if (ret_code)
638 goto init_ctrlq_free_sq;
639
640
641 return 0;
642
643init_ctrlq_free_sq:
644 ice_shutdown_sq(hw, cq);
645 return ret_code;
646}
647
648
649
650
651
652
653
654
655static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
656{
657 struct ice_ctl_q_info *cq;
658
659 switch (q_type) {
660 case ICE_CTL_Q_ADMIN:
661 cq = &hw->adminq;
662 if (ice_check_sq_alive(hw, cq))
663 ice_aq_q_shutdown(hw, true);
664 break;
665 case ICE_CTL_Q_MAILBOX:
666 cq = &hw->mailboxq;
667 break;
668 default:
669 return;
670 }
671
672 ice_shutdown_sq(hw, cq);
673 ice_shutdown_rq(hw, cq);
674}
675
676
677
678
679
680
681
682
683
684void ice_shutdown_all_ctrlq(struct ice_hw *hw)
685{
686
687 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
688
689 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
690}
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
706{
707 enum ice_status status;
708 u32 retry = 0;
709
710
711 do {
712 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
713 if (status)
714 return status;
715
716 status = ice_init_check_adminq(hw);
717 if (status != ICE_ERR_AQ_FW_CRITICAL)
718 break;
719
720 ice_debug(hw, ICE_DBG_AQ_MSG,
721 "Retry Admin Queue init due to FW critical error\n");
722 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
723 msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
724 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
725
726 if (status)
727 return status;
728
729 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
730}
731
732
733
734
735
736
737
738static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
739{
740 mutex_init(&cq->sq_lock);
741 mutex_init(&cq->rq_lock);
742}
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
761{
762 ice_init_ctrlq_locks(&hw->adminq);
763 ice_init_ctrlq_locks(&hw->mailboxq);
764
765 return ice_init_all_ctrlq(hw);
766}
767
768
769
770
771
772
773
774static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
775{
776 mutex_destroy(&cq->sq_lock);
777 mutex_destroy(&cq->rq_lock);
778}
779
780
781
782
783
784
785
786
787
788
789void ice_destroy_all_ctrlq(struct ice_hw *hw)
790{
791
792 ice_shutdown_all_ctrlq(hw);
793
794 ice_destroy_ctrlq_locks(&hw->adminq);
795 ice_destroy_ctrlq_locks(&hw->mailboxq);
796}
797
798
799
800
801
802
803
804
805static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
806{
807 struct ice_ctl_q_ring *sq = &cq->sq;
808 u16 ntc = sq->next_to_clean;
809 struct ice_sq_cd *details;
810 struct ice_aq_desc *desc;
811
812 desc = ICE_CTL_Q_DESC(*sq, ntc);
813 details = ICE_CTL_Q_DETAILS(*sq, ntc);
814
815 while (rd32(hw, cq->sq.head) != ntc) {
816 ice_debug(hw, ICE_DBG_AQ_MSG,
817 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
818 memset(desc, 0, sizeof(*desc));
819 memset(details, 0, sizeof(*details));
820 ntc++;
821 if (ntc == sq->count)
822 ntc = 0;
823 desc = ICE_CTL_Q_DESC(*sq, ntc);
824 details = ICE_CTL_Q_DETAILS(*sq, ntc);
825 }
826
827 sq->next_to_clean = ntc;
828
829 return ICE_CTL_Q_DESC_UNUSED(sq);
830}
831
832
833
834
835
836
837
838
839
840
841static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
842{
843 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
844 u16 len;
845
846 if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
847 !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
848 return;
849
850 if (!desc)
851 return;
852
853 len = le16_to_cpu(cq_desc->datalen);
854
855 ice_debug(hw, ICE_DBG_AQ_DESC,
856 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
857 le16_to_cpu(cq_desc->opcode),
858 le16_to_cpu(cq_desc->flags),
859 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
860 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
861 le32_to_cpu(cq_desc->cookie_high),
862 le32_to_cpu(cq_desc->cookie_low));
863 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
864 le32_to_cpu(cq_desc->params.generic.param0),
865 le32_to_cpu(cq_desc->params.generic.param1));
866 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
867 le32_to_cpu(cq_desc->params.generic.addr_high),
868 le32_to_cpu(cq_desc->params.generic.addr_low));
869 if (buf && cq_desc->datalen != 0) {
870 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
871 if (buf_len < len)
872 len = buf_len;
873
874 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
875 }
876}
877
878
879
880
881
882
883
884
885
886static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
887{
888
889
890
891 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
892}
893
894
895
896
897
898
899
900
901
902
903
904
905
906enum ice_status
907ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
908 struct ice_aq_desc *desc, void *buf, u16 buf_size,
909 struct ice_sq_cd *cd)
910{
911 struct ice_dma_mem *dma_buf = NULL;
912 struct ice_aq_desc *desc_on_ring;
913 bool cmd_completed = false;
914 enum ice_status status = 0;
915 struct ice_sq_cd *details;
916 u32 total_delay = 0;
917 u16 retval = 0;
918 u32 val = 0;
919
920
921 if (hw->reset_ongoing)
922 return ICE_ERR_RESET_ONGOING;
923 mutex_lock(&cq->sq_lock);
924
925 cq->sq_last_status = ICE_AQ_RC_OK;
926
927 if (!cq->sq.count) {
928 ice_debug(hw, ICE_DBG_AQ_MSG,
929 "Control Send queue not initialized.\n");
930 status = ICE_ERR_AQ_EMPTY;
931 goto sq_send_command_error;
932 }
933
934 if ((buf && !buf_size) || (!buf && buf_size)) {
935 status = ICE_ERR_PARAM;
936 goto sq_send_command_error;
937 }
938
939 if (buf) {
940 if (buf_size > cq->sq_buf_size) {
941 ice_debug(hw, ICE_DBG_AQ_MSG,
942 "Invalid buffer size for Control Send queue: %d.\n",
943 buf_size);
944 status = ICE_ERR_INVAL_SIZE;
945 goto sq_send_command_error;
946 }
947
948 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
949 if (buf_size > ICE_AQ_LG_BUF)
950 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
951 }
952
953 val = rd32(hw, cq->sq.head);
954 if (val >= cq->num_sq_entries) {
955 ice_debug(hw, ICE_DBG_AQ_MSG,
956 "head overrun at %d in the Control Send Queue ring\n",
957 val);
958 status = ICE_ERR_AQ_EMPTY;
959 goto sq_send_command_error;
960 }
961
962 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
963 if (cd)
964 *details = *cd;
965 else
966 memset(details, 0, sizeof(*details));
967
968
969
970
971
972
973 if (ice_clean_sq(hw, cq) == 0) {
974 ice_debug(hw, ICE_DBG_AQ_MSG,
975 "Error: Control Send Queue is full.\n");
976 status = ICE_ERR_AQ_FULL;
977 goto sq_send_command_error;
978 }
979
980
981 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
982
983
984 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
985
986
987 if (buf) {
988 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
989
990 memcpy(dma_buf->va, buf, buf_size);
991 desc_on_ring->datalen = cpu_to_le16(buf_size);
992
993
994
995
996 desc_on_ring->params.generic.addr_high =
997 cpu_to_le32(upper_32_bits(dma_buf->pa));
998 desc_on_ring->params.generic.addr_low =
999 cpu_to_le32(lower_32_bits(dma_buf->pa));
1000 }
1001
1002
1003 ice_debug(hw, ICE_DBG_AQ_DESC,
1004 "ATQ: Control Send queue desc and buffer:\n");
1005
1006 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1007
1008 (cq->sq.next_to_use)++;
1009 if (cq->sq.next_to_use == cq->sq.count)
1010 cq->sq.next_to_use = 0;
1011 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1012
1013 do {
1014 if (ice_sq_done(hw, cq))
1015 break;
1016
1017 udelay(ICE_CTL_Q_SQ_CMD_USEC);
1018 total_delay++;
1019 } while (total_delay < cq->sq_cmd_timeout);
1020
1021
1022 if (ice_sq_done(hw, cq)) {
1023 memcpy(desc, desc_on_ring, sizeof(*desc));
1024 if (buf) {
1025
1026 u16 copy_size = le16_to_cpu(desc->datalen);
1027
1028 if (copy_size > buf_size) {
1029 ice_debug(hw, ICE_DBG_AQ_MSG,
1030 "Return len %d > than buf len %d\n",
1031 copy_size, buf_size);
1032 status = ICE_ERR_AQ_ERROR;
1033 } else {
1034 memcpy(buf, dma_buf->va, copy_size);
1035 }
1036 }
1037 retval = le16_to_cpu(desc->retval);
1038 if (retval) {
1039 ice_debug(hw, ICE_DBG_AQ_MSG,
1040 "Control Send Queue command 0x%04X completed with error 0x%X\n",
1041 le16_to_cpu(desc->opcode),
1042 retval);
1043
1044
1045 retval &= 0xff;
1046 }
1047 cmd_completed = true;
1048 if (!status && retval != ICE_AQ_RC_OK)
1049 status = ICE_ERR_AQ_ERROR;
1050 cq->sq_last_status = (enum ice_aq_err)retval;
1051 }
1052
1053 ice_debug(hw, ICE_DBG_AQ_MSG,
1054 "ATQ: desc and buffer writeback:\n");
1055
1056 ice_debug_cq(hw, (void *)desc, buf, buf_size);
1057
1058
1059 if (details->wb_desc)
1060 memcpy(details->wb_desc, desc_on_ring,
1061 sizeof(*details->wb_desc));
1062
1063
1064 if (!cmd_completed) {
1065 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1066 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1067 ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1068 status = ICE_ERR_AQ_FW_CRITICAL;
1069 } else {
1070 ice_debug(hw, ICE_DBG_AQ_MSG,
1071 "Control Send Queue Writeback timeout.\n");
1072 status = ICE_ERR_AQ_TIMEOUT;
1073 }
1074 }
1075
1076sq_send_command_error:
1077 mutex_unlock(&cq->sq_lock);
1078 return status;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1089{
1090
1091 memset(desc, 0, sizeof(*desc));
1092 desc->opcode = cpu_to_le16(opcode);
1093 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107enum ice_status
1108ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1109 struct ice_rq_event_info *e, u16 *pending)
1110{
1111 u16 ntc = cq->rq.next_to_clean;
1112 enum ice_status ret_code = 0;
1113 struct ice_aq_desc *desc;
1114 struct ice_dma_mem *bi;
1115 u16 desc_idx;
1116 u16 datalen;
1117 u16 flags;
1118 u16 ntu;
1119
1120
1121 memset(&e->desc, 0, sizeof(e->desc));
1122
1123
1124 mutex_lock(&cq->rq_lock);
1125
1126 if (!cq->rq.count) {
1127 ice_debug(hw, ICE_DBG_AQ_MSG,
1128 "Control Receive queue not initialized.\n");
1129 ret_code = ICE_ERR_AQ_EMPTY;
1130 goto clean_rq_elem_err;
1131 }
1132
1133
1134 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1135
1136 if (ntu == ntc) {
1137
1138 ret_code = ICE_ERR_AQ_NO_WORK;
1139 goto clean_rq_elem_out;
1140 }
1141
1142
1143 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1144 desc_idx = ntc;
1145
1146 cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1147 flags = le16_to_cpu(desc->flags);
1148 if (flags & ICE_AQ_FLAG_ERR) {
1149 ret_code = ICE_ERR_AQ_ERROR;
1150 ice_debug(hw, ICE_DBG_AQ_MSG,
1151 "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1152 le16_to_cpu(desc->opcode),
1153 cq->rq_last_status);
1154 }
1155 memcpy(&e->desc, desc, sizeof(e->desc));
1156 datalen = le16_to_cpu(desc->datalen);
1157 e->msg_len = min_t(u16, datalen, e->buf_len);
1158 if (e->msg_buf && e->msg_len)
1159 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1160
1161 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1162
1163 ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1164
1165
1166
1167
1168 bi = &cq->rq.r.rq_bi[ntc];
1169 memset(desc, 0, sizeof(*desc));
1170
1171 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1172 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1173 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1174 desc->datalen = cpu_to_le16(bi->size);
1175 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1176 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1177
1178
1179 wr32(hw, cq->rq.tail, ntc);
1180
1181 ntc++;
1182 if (ntc == cq->num_rq_entries)
1183 ntc = 0;
1184 cq->rq.next_to_clean = ntc;
1185 cq->rq.next_to_use = ntu;
1186
1187clean_rq_elem_out:
1188
1189 if (pending) {
1190
1191 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1192 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1193 }
1194clean_rq_elem_err:
1195 mutex_unlock(&cq->rq_lock);
1196
1197 return ret_code;
1198}
1199