1
2
3
4#include "ice_common.h"
5
6
7
8
9
10
11
12static void ice_adminq_init_regs(struct ice_hw *hw)
13{
14 struct ice_ctl_q_info *cq = &hw->adminq;
15
16 cq->sq.head = PF_FW_ATQH;
17 cq->sq.tail = PF_FW_ATQT;
18 cq->sq.len = PF_FW_ATQLEN;
19 cq->sq.bah = PF_FW_ATQBAH;
20 cq->sq.bal = PF_FW_ATQBAL;
21 cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M;
22 cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
23 cq->sq.head_mask = PF_FW_ATQH_ATQH_M;
24
25 cq->rq.head = PF_FW_ARQH;
26 cq->rq.tail = PF_FW_ARQT;
27 cq->rq.len = PF_FW_ARQLEN;
28 cq->rq.bah = PF_FW_ARQBAH;
29 cq->rq.bal = PF_FW_ARQBAL;
30 cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M;
31 cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
32 cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
33}
34
35
36
37
38
39
40
41static void ice_mailbox_init_regs(struct ice_hw *hw)
42{
43 struct ice_ctl_q_info *cq = &hw->mailboxq;
44
45
46 cq->sq.head = PF_MBX_ATQH;
47 cq->sq.tail = PF_MBX_ATQT;
48 cq->sq.len = PF_MBX_ATQLEN;
49 cq->sq.bah = PF_MBX_ATQBAH;
50 cq->sq.bal = PF_MBX_ATQBAL;
51 cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
52 cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
53 cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
54
55 cq->rq.head = PF_MBX_ARQH;
56 cq->rq.tail = PF_MBX_ARQT;
57 cq->rq.len = PF_MBX_ARQLEN;
58 cq->rq.bah = PF_MBX_ARQBAH;
59 cq->rq.bal = PF_MBX_ARQBAL;
60 cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
61 cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
62 cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
63}
64
65
66
67
68
69
70
71
72bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
73{
74
75 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
76 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
77 cq->sq.len_ena_mask)) ==
78 (cq->num_sq_entries | cq->sq.len_ena_mask);
79
80 return false;
81}
82
83
84
85
86
87
88static enum ice_status
89ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
90{
91 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
92
93 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
94 &cq->sq.desc_buf.pa,
95 GFP_KERNEL | __GFP_ZERO);
96 if (!cq->sq.desc_buf.va)
97 return ICE_ERR_NO_MEMORY;
98 cq->sq.desc_buf.size = size;
99
100 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
101 sizeof(struct ice_sq_cd), GFP_KERNEL);
102 if (!cq->sq.cmd_buf) {
103 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
104 cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
105 cq->sq.desc_buf.va = NULL;
106 cq->sq.desc_buf.pa = 0;
107 cq->sq.desc_buf.size = 0;
108 return ICE_ERR_NO_MEMORY;
109 }
110
111 return 0;
112}
113
114
115
116
117
118
119static enum ice_status
120ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
121{
122 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
123
124 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
125 &cq->rq.desc_buf.pa,
126 GFP_KERNEL | __GFP_ZERO);
127 if (!cq->rq.desc_buf.va)
128 return ICE_ERR_NO_MEMORY;
129 cq->rq.desc_buf.size = size;
130 return 0;
131}
132
133
134
135
136
137
138
139
140
141static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
142{
143 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
144 cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
145 cq->sq.desc_buf.va = NULL;
146 cq->sq.desc_buf.pa = 0;
147 cq->sq.desc_buf.size = 0;
148}
149
150
151
152
153
154
155
156
157
158static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
159{
160 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size,
161 cq->rq.desc_buf.va, cq->rq.desc_buf.pa);
162 cq->rq.desc_buf.va = NULL;
163 cq->rq.desc_buf.pa = 0;
164 cq->rq.desc_buf.size = 0;
165}
166
167
168
169
170
171
172static enum ice_status
173ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
174{
175 int i;
176
177
178
179
180 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
181 sizeof(cq->rq.desc_buf), GFP_KERNEL);
182 if (!cq->rq.dma_head)
183 return ICE_ERR_NO_MEMORY;
184 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
185
186
187 for (i = 0; i < cq->num_rq_entries; i++) {
188 struct ice_aq_desc *desc;
189 struct ice_dma_mem *bi;
190
191 bi = &cq->rq.r.rq_bi[i];
192 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
193 cq->rq_buf_size, &bi->pa,
194 GFP_KERNEL | __GFP_ZERO);
195 if (!bi->va)
196 goto unwind_alloc_rq_bufs;
197 bi->size = cq->rq_buf_size;
198
199
200 desc = ICE_CTL_Q_DESC(cq->rq, i);
201
202 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
203 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
204 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
205 desc->opcode = 0;
206
207
208
209 desc->datalen = cpu_to_le16(bi->size);
210 desc->retval = 0;
211 desc->cookie_high = 0;
212 desc->cookie_low = 0;
213 desc->params.generic.addr_high =
214 cpu_to_le32(upper_32_bits(bi->pa));
215 desc->params.generic.addr_low =
216 cpu_to_le32(lower_32_bits(bi->pa));
217 desc->params.generic.param0 = 0;
218 desc->params.generic.param1 = 0;
219 }
220 return 0;
221
222unwind_alloc_rq_bufs:
223
224 i--;
225 for (; i >= 0; i--) {
226 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
227 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
228 cq->rq.r.rq_bi[i].va = NULL;
229 cq->rq.r.rq_bi[i].pa = 0;
230 cq->rq.r.rq_bi[i].size = 0;
231 }
232 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
233
234 return ICE_ERR_NO_MEMORY;
235}
236
237
238
239
240
241
242static enum ice_status
243ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
244{
245 int i;
246
247
248 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
249 sizeof(cq->sq.desc_buf), GFP_KERNEL);
250 if (!cq->sq.dma_head)
251 return ICE_ERR_NO_MEMORY;
252 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
253
254
255 for (i = 0; i < cq->num_sq_entries; i++) {
256 struct ice_dma_mem *bi;
257
258 bi = &cq->sq.r.sq_bi[i];
259 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
260 cq->sq_buf_size, &bi->pa,
261 GFP_KERNEL | __GFP_ZERO);
262 if (!bi->va)
263 goto unwind_alloc_sq_bufs;
264 bi->size = cq->sq_buf_size;
265 }
266 return 0;
267
268unwind_alloc_sq_bufs:
269
270 i--;
271 for (; i >= 0; i--) {
272 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
273 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
274 cq->sq.r.sq_bi[i].va = NULL;
275 cq->sq.r.sq_bi[i].pa = 0;
276 cq->sq.r.sq_bi[i].size = 0;
277 }
278 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
279
280 return ICE_ERR_NO_MEMORY;
281}
282
283
284
285
286
287
288static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
289{
290 int i;
291
292
293 for (i = 0; i < cq->num_rq_entries; i++) {
294 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
295 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
296 cq->rq.r.rq_bi[i].va = NULL;
297 cq->rq.r.rq_bi[i].pa = 0;
298 cq->rq.r.rq_bi[i].size = 0;
299 }
300
301
302 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
303}
304
305
306
307
308
309
310static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
311{
312 int i;
313
314
315 for (i = 0; i < cq->num_sq_entries; i++)
316 if (cq->sq.r.sq_bi[i].pa) {
317 dmam_free_coherent(ice_hw_to_dev(hw),
318 cq->sq.r.sq_bi[i].size,
319 cq->sq.r.sq_bi[i].va,
320 cq->sq.r.sq_bi[i].pa);
321 cq->sq.r.sq_bi[i].va = NULL;
322 cq->sq.r.sq_bi[i].pa = 0;
323 cq->sq.r.sq_bi[i].size = 0;
324 }
325
326
327 devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf);
328
329
330 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
331}
332
333
334
335
336
337
338
339
340static enum ice_status
341ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
342{
343 u32 reg = 0;
344
345
346 wr32(hw, cq->sq.head, 0);
347 wr32(hw, cq->sq.tail, 0);
348
349
350 wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask));
351 wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa));
352 wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa));
353
354
355 reg = rd32(hw, cq->sq.bal);
356 if (reg != lower_32_bits(cq->sq.desc_buf.pa))
357 return ICE_ERR_AQ_ERROR;
358
359 return 0;
360}
361
362
363
364
365
366
367
368
369static enum ice_status
370ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
371{
372 u32 reg = 0;
373
374
375 wr32(hw, cq->rq.head, 0);
376 wr32(hw, cq->rq.tail, 0);
377
378
379 wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask));
380 wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa));
381 wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa));
382
383
384 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
385
386
387 reg = rd32(hw, cq->rq.bal);
388 if (reg != lower_32_bits(cq->rq.desc_buf.pa))
389 return ICE_ERR_AQ_ERROR;
390
391 return 0;
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
409{
410 enum ice_status ret_code;
411
412 if (cq->sq.count > 0) {
413
414 ret_code = ICE_ERR_NOT_READY;
415 goto init_ctrlq_exit;
416 }
417
418
419 if (!cq->num_sq_entries || !cq->sq_buf_size) {
420 ret_code = ICE_ERR_CFG;
421 goto init_ctrlq_exit;
422 }
423
424 cq->sq.next_to_use = 0;
425 cq->sq.next_to_clean = 0;
426
427
428 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
429 if (ret_code)
430 goto init_ctrlq_exit;
431
432
433 ret_code = ice_alloc_sq_bufs(hw, cq);
434 if (ret_code)
435 goto init_ctrlq_free_rings;
436
437
438 ret_code = ice_cfg_sq_regs(hw, cq);
439 if (ret_code)
440 goto init_ctrlq_free_rings;
441
442
443 cq->sq.count = cq->num_sq_entries;
444 goto init_ctrlq_exit;
445
446init_ctrlq_free_rings:
447 ice_free_ctrlq_sq_ring(hw, cq);
448
449init_ctrlq_exit:
450 return ret_code;
451}
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
468{
469 enum ice_status ret_code;
470
471 if (cq->rq.count > 0) {
472
473 ret_code = ICE_ERR_NOT_READY;
474 goto init_ctrlq_exit;
475 }
476
477
478 if (!cq->num_rq_entries || !cq->rq_buf_size) {
479 ret_code = ICE_ERR_CFG;
480 goto init_ctrlq_exit;
481 }
482
483 cq->rq.next_to_use = 0;
484 cq->rq.next_to_clean = 0;
485
486
487 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
488 if (ret_code)
489 goto init_ctrlq_exit;
490
491
492 ret_code = ice_alloc_rq_bufs(hw, cq);
493 if (ret_code)
494 goto init_ctrlq_free_rings;
495
496
497 ret_code = ice_cfg_rq_regs(hw, cq);
498 if (ret_code)
499 goto init_ctrlq_free_rings;
500
501
502 cq->rq.count = cq->num_rq_entries;
503 goto init_ctrlq_exit;
504
505init_ctrlq_free_rings:
506 ice_free_ctrlq_rq_ring(hw, cq);
507
508init_ctrlq_exit:
509 return ret_code;
510}
511
512
513
514
515
516
517
518
519static enum ice_status
520ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
521{
522 enum ice_status ret_code = 0;
523
524 mutex_lock(&cq->sq_lock);
525
526 if (!cq->sq.count) {
527 ret_code = ICE_ERR_NOT_READY;
528 goto shutdown_sq_out;
529 }
530
531
532 wr32(hw, cq->sq.head, 0);
533 wr32(hw, cq->sq.tail, 0);
534 wr32(hw, cq->sq.len, 0);
535 wr32(hw, cq->sq.bal, 0);
536 wr32(hw, cq->sq.bah, 0);
537
538 cq->sq.count = 0;
539
540
541 ice_free_sq_bufs(hw, cq);
542 ice_free_ctrlq_sq_ring(hw, cq);
543
544shutdown_sq_out:
545 mutex_unlock(&cq->sq_lock);
546 return ret_code;
547}
548
549
550
551
552
553
554
555
556
557static bool ice_aq_ver_check(struct ice_hw *hw)
558{
559 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
560
561 dev_warn(ice_hw_to_dev(hw),
562 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
563 return false;
564 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
565 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
566 dev_info(ice_hw_to_dev(hw),
567 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
568 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
569 dev_info(ice_hw_to_dev(hw),
570 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
571 } else {
572
573 dev_info(ice_hw_to_dev(hw),
574 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
575 }
576 return true;
577}
578
579
580
581
582
583
584
585
586static enum ice_status
587ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
588{
589 enum ice_status ret_code = 0;
590
591 mutex_lock(&cq->rq_lock);
592
593 if (!cq->rq.count) {
594 ret_code = ICE_ERR_NOT_READY;
595 goto shutdown_rq_out;
596 }
597
598
599 wr32(hw, cq->rq.head, 0);
600 wr32(hw, cq->rq.tail, 0);
601 wr32(hw, cq->rq.len, 0);
602 wr32(hw, cq->rq.bal, 0);
603 wr32(hw, cq->rq.bah, 0);
604
605
606 cq->rq.count = 0;
607
608
609 ice_free_rq_bufs(hw, cq);
610 ice_free_ctrlq_rq_ring(hw, cq);
611
612shutdown_rq_out:
613 mutex_unlock(&cq->rq_lock);
614 return ret_code;
615}
616
617
618
619
620
621static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
622{
623 struct ice_ctl_q_info *cq = &hw->adminq;
624 enum ice_status status;
625
626 status = ice_aq_get_fw_ver(hw, NULL);
627 if (status)
628 goto init_ctrlq_free_rq;
629
630 if (!ice_aq_ver_check(hw)) {
631 status = ICE_ERR_FW_API_VER;
632 goto init_ctrlq_free_rq;
633 }
634
635 return 0;
636
637init_ctrlq_free_rq:
638 if (cq->rq.count) {
639 ice_shutdown_rq(hw, cq);
640 mutex_destroy(&cq->rq_lock);
641 }
642 if (cq->sq.count) {
643 ice_shutdown_sq(hw, cq);
644 mutex_destroy(&cq->sq_lock);
645 }
646 return status;
647}
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
663{
664 struct ice_ctl_q_info *cq;
665 enum ice_status ret_code;
666
667 switch (q_type) {
668 case ICE_CTL_Q_ADMIN:
669 ice_adminq_init_regs(hw);
670 cq = &hw->adminq;
671 break;
672 case ICE_CTL_Q_MAILBOX:
673 ice_mailbox_init_regs(hw);
674 cq = &hw->mailboxq;
675 break;
676 default:
677 return ICE_ERR_PARAM;
678 }
679 cq->qtype = q_type;
680
681
682 if (!cq->num_rq_entries || !cq->num_sq_entries ||
683 !cq->rq_buf_size || !cq->sq_buf_size) {
684 return ICE_ERR_CFG;
685 }
686 mutex_init(&cq->sq_lock);
687 mutex_init(&cq->rq_lock);
688
689
690 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
691
692
693 ret_code = ice_init_sq(hw, cq);
694 if (ret_code)
695 goto init_ctrlq_destroy_locks;
696
697
698 ret_code = ice_init_rq(hw, cq);
699 if (ret_code)
700 goto init_ctrlq_free_sq;
701
702
703 return 0;
704
705init_ctrlq_free_sq:
706 ice_shutdown_sq(hw, cq);
707init_ctrlq_destroy_locks:
708 mutex_destroy(&cq->sq_lock);
709 mutex_destroy(&cq->rq_lock);
710 return ret_code;
711}
712
713
714
715
716
717
718
719
720
721
722
723
724enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
725{
726 enum ice_status ret_code;
727
728
729 ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
730 if (ret_code)
731 return ret_code;
732
733 ret_code = ice_init_check_adminq(hw);
734 if (ret_code)
735 return ret_code;
736
737
738 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
739}
740
741
742
743
744
745
746static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
747{
748 struct ice_ctl_q_info *cq;
749
750 switch (q_type) {
751 case ICE_CTL_Q_ADMIN:
752 cq = &hw->adminq;
753 if (ice_check_sq_alive(hw, cq))
754 ice_aq_q_shutdown(hw, true);
755 break;
756 case ICE_CTL_Q_MAILBOX:
757 cq = &hw->mailboxq;
758 break;
759 default:
760 return;
761 }
762
763 if (cq->sq.count) {
764 ice_shutdown_sq(hw, cq);
765 mutex_destroy(&cq->sq_lock);
766 }
767 if (cq->rq.count) {
768 ice_shutdown_rq(hw, cq);
769 mutex_destroy(&cq->rq_lock);
770 }
771}
772
773
774
775
776
777void ice_shutdown_all_ctrlq(struct ice_hw *hw)
778{
779
780 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
781
782 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
783}
784
785
786
787
788
789
790
791
792static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
793{
794 struct ice_ctl_q_ring *sq = &cq->sq;
795 u16 ntc = sq->next_to_clean;
796 struct ice_sq_cd *details;
797 struct ice_aq_desc *desc;
798
799 desc = ICE_CTL_Q_DESC(*sq, ntc);
800 details = ICE_CTL_Q_DETAILS(*sq, ntc);
801
802 while (rd32(hw, cq->sq.head) != ntc) {
803 ice_debug(hw, ICE_DBG_AQ_MSG,
804 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
805 memset(desc, 0, sizeof(*desc));
806 memset(details, 0, sizeof(*details));
807 ntc++;
808 if (ntc == sq->count)
809 ntc = 0;
810 desc = ICE_CTL_Q_DESC(*sq, ntc);
811 details = ICE_CTL_Q_DETAILS(*sq, ntc);
812 }
813
814 sq->next_to_clean = ntc;
815
816 return ICE_CTL_Q_DESC_UNUSED(sq);
817}
818
819
820
821
822
823
824
825
826
827static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
828{
829
830
831
832 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
833}
834
835
836
837
838
839
840
841
842
843
844
845
846
847enum ice_status
848ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
849 struct ice_aq_desc *desc, void *buf, u16 buf_size,
850 struct ice_sq_cd *cd)
851{
852 struct ice_dma_mem *dma_buf = NULL;
853 struct ice_aq_desc *desc_on_ring;
854 bool cmd_completed = false;
855 enum ice_status status = 0;
856 struct ice_sq_cd *details;
857 u32 total_delay = 0;
858 u16 retval = 0;
859 u32 val = 0;
860
861
862 if (hw->reset_ongoing)
863 return ICE_ERR_RESET_ONGOING;
864 mutex_lock(&cq->sq_lock);
865
866 cq->sq_last_status = ICE_AQ_RC_OK;
867
868 if (!cq->sq.count) {
869 ice_debug(hw, ICE_DBG_AQ_MSG,
870 "Control Send queue not initialized.\n");
871 status = ICE_ERR_AQ_EMPTY;
872 goto sq_send_command_error;
873 }
874
875 if ((buf && !buf_size) || (!buf && buf_size)) {
876 status = ICE_ERR_PARAM;
877 goto sq_send_command_error;
878 }
879
880 if (buf) {
881 if (buf_size > cq->sq_buf_size) {
882 ice_debug(hw, ICE_DBG_AQ_MSG,
883 "Invalid buffer size for Control Send queue: %d.\n",
884 buf_size);
885 status = ICE_ERR_INVAL_SIZE;
886 goto sq_send_command_error;
887 }
888
889 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
890 if (buf_size > ICE_AQ_LG_BUF)
891 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
892 }
893
894 val = rd32(hw, cq->sq.head);
895 if (val >= cq->num_sq_entries) {
896 ice_debug(hw, ICE_DBG_AQ_MSG,
897 "head overrun at %d in the Control Send Queue ring\n",
898 val);
899 status = ICE_ERR_AQ_EMPTY;
900 goto sq_send_command_error;
901 }
902
903 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
904 if (cd)
905 *details = *cd;
906 else
907 memset(details, 0, sizeof(*details));
908
909
910
911
912
913
914 if (ice_clean_sq(hw, cq) == 0) {
915 ice_debug(hw, ICE_DBG_AQ_MSG,
916 "Error: Control Send Queue is full.\n");
917 status = ICE_ERR_AQ_FULL;
918 goto sq_send_command_error;
919 }
920
921
922 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
923
924
925 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
926
927
928 if (buf) {
929 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
930
931 memcpy(dma_buf->va, buf, buf_size);
932 desc_on_ring->datalen = cpu_to_le16(buf_size);
933
934
935
936
937 desc_on_ring->params.generic.addr_high =
938 cpu_to_le32(upper_32_bits(dma_buf->pa));
939 desc_on_ring->params.generic.addr_low =
940 cpu_to_le32(lower_32_bits(dma_buf->pa));
941 }
942
943
944 ice_debug(hw, ICE_DBG_AQ_MSG,
945 "ATQ: Control Send queue desc and buffer:\n");
946
947 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);
948
949 (cq->sq.next_to_use)++;
950 if (cq->sq.next_to_use == cq->sq.count)
951 cq->sq.next_to_use = 0;
952 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
953
954 do {
955 if (ice_sq_done(hw, cq))
956 break;
957
958 mdelay(1);
959 total_delay++;
960 } while (total_delay < cq->sq_cmd_timeout);
961
962
963 if (ice_sq_done(hw, cq)) {
964 memcpy(desc, desc_on_ring, sizeof(*desc));
965 if (buf) {
966
967 u16 copy_size = le16_to_cpu(desc->datalen);
968
969 if (copy_size > buf_size) {
970 ice_debug(hw, ICE_DBG_AQ_MSG,
971 "Return len %d > than buf len %d\n",
972 copy_size, buf_size);
973 status = ICE_ERR_AQ_ERROR;
974 } else {
975 memcpy(buf, dma_buf->va, copy_size);
976 }
977 }
978 retval = le16_to_cpu(desc->retval);
979 if (retval) {
980 ice_debug(hw, ICE_DBG_AQ_MSG,
981 "Control Send Queue command completed with error 0x%x\n",
982 retval);
983
984
985 retval &= 0xff;
986 }
987 cmd_completed = true;
988 if (!status && retval != ICE_AQ_RC_OK)
989 status = ICE_ERR_AQ_ERROR;
990 cq->sq_last_status = (enum ice_aq_err)retval;
991 }
992
993 ice_debug(hw, ICE_DBG_AQ_MSG,
994 "ATQ: desc and buffer writeback:\n");
995
996 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);
997
998
999 if (details->wb_desc)
1000 memcpy(details->wb_desc, desc_on_ring,
1001 sizeof(*details->wb_desc));
1002
1003
1004 if (!cmd_completed) {
1005 ice_debug(hw, ICE_DBG_AQ_MSG,
1006 "Control Send Queue Writeback timeout.\n");
1007 status = ICE_ERR_AQ_TIMEOUT;
1008 }
1009
1010sq_send_command_error:
1011 mutex_unlock(&cq->sq_lock);
1012 return status;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1023{
1024
1025 memset(desc, 0, sizeof(*desc));
1026 desc->opcode = cpu_to_le16(opcode);
1027 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041enum ice_status
1042ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1043 struct ice_rq_event_info *e, u16 *pending)
1044{
1045 u16 ntc = cq->rq.next_to_clean;
1046 enum ice_status ret_code = 0;
1047 struct ice_aq_desc *desc;
1048 struct ice_dma_mem *bi;
1049 u16 desc_idx;
1050 u16 datalen;
1051 u16 flags;
1052 u16 ntu;
1053
1054
1055 memset(&e->desc, 0, sizeof(e->desc));
1056
1057
1058 mutex_lock(&cq->rq_lock);
1059
1060 if (!cq->rq.count) {
1061 ice_debug(hw, ICE_DBG_AQ_MSG,
1062 "Control Receive queue not initialized.\n");
1063 ret_code = ICE_ERR_AQ_EMPTY;
1064 goto clean_rq_elem_err;
1065 }
1066
1067
1068 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1069
1070 if (ntu == ntc) {
1071
1072 ret_code = ICE_ERR_AQ_NO_WORK;
1073 goto clean_rq_elem_out;
1074 }
1075
1076
1077 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1078 desc_idx = ntc;
1079
1080 cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1081 flags = le16_to_cpu(desc->flags);
1082 if (flags & ICE_AQ_FLAG_ERR) {
1083 ret_code = ICE_ERR_AQ_ERROR;
1084 ice_debug(hw, ICE_DBG_AQ_MSG,
1085 "Control Receive Queue Event received with error 0x%x\n",
1086 cq->rq_last_status);
1087 }
1088 memcpy(&e->desc, desc, sizeof(e->desc));
1089 datalen = le16_to_cpu(desc->datalen);
1090 e->msg_len = min(datalen, e->buf_len);
1091 if (e->msg_buf && e->msg_len)
1092 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1093
1094 ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
1095
1096 ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
1097 cq->rq_buf_size);
1098
1099
1100
1101
1102 bi = &cq->rq.r.rq_bi[ntc];
1103 memset(desc, 0, sizeof(*desc));
1104
1105 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1106 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1107 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1108 desc->datalen = cpu_to_le16(bi->size);
1109 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1110 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1111
1112
1113 wr32(hw, cq->rq.tail, ntc);
1114
1115 ntc++;
1116 if (ntc == cq->num_rq_entries)
1117 ntc = 0;
1118 cq->rq.next_to_clean = ntc;
1119 cq->rq.next_to_use = ntu;
1120
1121clean_rq_elem_out:
1122
1123 if (pending) {
1124
1125 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1126 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1127 }
1128clean_rq_elem_err:
1129 mutex_unlock(&cq->rq_lock);
1130
1131 return ret_code;
1132}
1133