1
2
3
4
5#include "iavf_status.h"
6#include "iavf_type.h"
7#include "iavf_register.h"
8#include "iavf_adminq.h"
9#include "iavf_prototype.h"
10
11
12
13
14
15
16
17STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
18{
19
20 hw->aq.asq.tail = IAVF_VF_ATQT1;
21 hw->aq.asq.head = IAVF_VF_ATQH1;
22 hw->aq.asq.len = IAVF_VF_ATQLEN1;
23 hw->aq.asq.bal = IAVF_VF_ATQBAL1;
24 hw->aq.asq.bah = IAVF_VF_ATQBAH1;
25 hw->aq.arq.tail = IAVF_VF_ARQT1;
26 hw->aq.arq.head = IAVF_VF_ARQH1;
27 hw->aq.arq.len = IAVF_VF_ARQLEN1;
28 hw->aq.arq.bal = IAVF_VF_ARQBAL1;
29 hw->aq.arq.bah = IAVF_VF_ARQBAH1;
30}
31
32
33
34
35
36enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
37{
38 enum iavf_status ret_code;
39
40 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
41 iavf_mem_atq_ring,
42 (hw->aq.num_asq_entries *
43 sizeof(struct iavf_aq_desc)),
44 IAVF_ADMINQ_DESC_ALIGNMENT);
45 if (ret_code)
46 return ret_code;
47
48 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
49 (hw->aq.num_asq_entries *
50 sizeof(struct iavf_asq_cmd_details)));
51 if (ret_code) {
52 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
53 return ret_code;
54 }
55
56 return ret_code;
57}
58
59
60
61
62
63enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
64{
65 enum iavf_status ret_code;
66
67 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
68 iavf_mem_arq_ring,
69 (hw->aq.num_arq_entries *
70 sizeof(struct iavf_aq_desc)),
71 IAVF_ADMINQ_DESC_ALIGNMENT);
72
73 return ret_code;
74}
75
76
77
78
79
80
81
82
83void iavf_free_adminq_asq(struct iavf_hw *hw)
84{
85 iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
86 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
87}
88
89
90
91
92
93
94
95
96void iavf_free_adminq_arq(struct iavf_hw *hw)
97{
98 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
99}
100
101
102
103
104
105STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
106{
107 enum iavf_status ret_code;
108 struct iavf_aq_desc *desc;
109 struct iavf_dma_mem *bi;
110 int i;
111
112
113
114
115
116
117 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
118 (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
119 if (ret_code)
120 goto alloc_arq_bufs;
121 hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
122
123
124 for (i = 0; i < hw->aq.num_arq_entries; i++) {
125 bi = &hw->aq.arq.r.arq_bi[i];
126 ret_code = iavf_allocate_dma_mem(hw, bi,
127 iavf_mem_arq_buf,
128 hw->aq.arq_buf_size,
129 IAVF_ADMINQ_DESC_ALIGNMENT);
130 if (ret_code)
131 goto unwind_alloc_arq_bufs;
132
133
134 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
135
136 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
137 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
138 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
139 desc->opcode = 0;
140
141
142
143 desc->datalen = CPU_TO_LE16((u16)bi->size);
144 desc->retval = 0;
145 desc->cookie_high = 0;
146 desc->cookie_low = 0;
147 desc->params.external.addr_high =
148 CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
149 desc->params.external.addr_low =
150 CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
151 desc->params.external.param0 = 0;
152 desc->params.external.param1 = 0;
153 }
154
155alloc_arq_bufs:
156 return ret_code;
157
158unwind_alloc_arq_bufs:
159
160 i--;
161 for (; i >= 0; i--)
162 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
163 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
164
165 return ret_code;
166}
167
168
169
170
171
172STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
173{
174 enum iavf_status ret_code;
175 struct iavf_dma_mem *bi;
176 int i;
177
178
179 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
180 (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
181 if (ret_code)
182 goto alloc_asq_bufs;
183 hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
184
185
186 for (i = 0; i < hw->aq.num_asq_entries; i++) {
187 bi = &hw->aq.asq.r.asq_bi[i];
188 ret_code = iavf_allocate_dma_mem(hw, bi,
189 iavf_mem_asq_buf,
190 hw->aq.asq_buf_size,
191 IAVF_ADMINQ_DESC_ALIGNMENT);
192 if (ret_code)
193 goto unwind_alloc_asq_bufs;
194 }
195alloc_asq_bufs:
196 return ret_code;
197
198unwind_alloc_asq_bufs:
199
200 i--;
201 for (; i >= 0; i--)
202 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
203 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
204
205 return ret_code;
206}
207
208
209
210
211
212STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
213{
214 int i;
215
216
217 for (i = 0; i < hw->aq.num_arq_entries; i++)
218 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
219
220
221 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
222
223
224 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
225}
226
227
228
229
230
231STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
232{
233 int i;
234
235
236 for (i = 0; i < hw->aq.num_asq_entries; i++)
237 if (hw->aq.asq.r.asq_bi[i].pa)
238 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
239
240
241 iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
242
243
244 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
245
246
247 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
248}
249
250
251
252
253
254
255
256STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
257{
258 enum iavf_status ret_code = IAVF_SUCCESS;
259 u32 reg = 0;
260
261
262 wr32(hw, hw->aq.asq.head, 0);
263 wr32(hw, hw->aq.asq.tail, 0);
264
265
266 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
267 IAVF_VF_ATQLEN1_ATQENABLE_MASK));
268 wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
269 wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
270
271
272 reg = rd32(hw, hw->aq.asq.bal);
273 if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
274 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
275
276 return ret_code;
277}
278
279
280
281
282
283
284
285STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
286{
287 enum iavf_status ret_code = IAVF_SUCCESS;
288 u32 reg = 0;
289
290
291 wr32(hw, hw->aq.arq.head, 0);
292 wr32(hw, hw->aq.arq.tail, 0);
293
294
295 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
296 IAVF_VF_ARQLEN1_ARQENABLE_MASK));
297 wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
298 wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
299
300
301 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
302
303
304 reg = rd32(hw, hw->aq.arq.bal);
305 if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
306 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
307
308 return ret_code;
309}
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324enum iavf_status iavf_init_asq(struct iavf_hw *hw)
325{
326 enum iavf_status ret_code = IAVF_SUCCESS;
327
328 if (hw->aq.asq.count > 0) {
329
330 ret_code = IAVF_ERR_NOT_READY;
331 goto init_adminq_exit;
332 }
333
334
335 if ((hw->aq.num_asq_entries == 0) ||
336 (hw->aq.asq_buf_size == 0)) {
337 ret_code = IAVF_ERR_CONFIG;
338 goto init_adminq_exit;
339 }
340
341 hw->aq.asq.next_to_use = 0;
342 hw->aq.asq.next_to_clean = 0;
343
344
345 ret_code = iavf_alloc_adminq_asq_ring(hw);
346 if (ret_code != IAVF_SUCCESS)
347 goto init_adminq_exit;
348
349
350 ret_code = iavf_alloc_asq_bufs(hw);
351 if (ret_code != IAVF_SUCCESS)
352 goto init_adminq_free_rings;
353
354
355 ret_code = iavf_config_asq_regs(hw);
356 if (ret_code != IAVF_SUCCESS)
357 goto init_config_regs;
358
359
360 hw->aq.asq.count = hw->aq.num_asq_entries;
361 goto init_adminq_exit;
362
363init_adminq_free_rings:
364 iavf_free_adminq_asq(hw);
365 return ret_code;
366
367init_config_regs:
368 iavf_free_asq_bufs(hw);
369
370init_adminq_exit:
371 return ret_code;
372}
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387enum iavf_status iavf_init_arq(struct iavf_hw *hw)
388{
389 enum iavf_status ret_code = IAVF_SUCCESS;
390
391 if (hw->aq.arq.count > 0) {
392
393 ret_code = IAVF_ERR_NOT_READY;
394 goto init_adminq_exit;
395 }
396
397
398 if ((hw->aq.num_arq_entries == 0) ||
399 (hw->aq.arq_buf_size == 0)) {
400 ret_code = IAVF_ERR_CONFIG;
401 goto init_adminq_exit;
402 }
403
404 hw->aq.arq.next_to_use = 0;
405 hw->aq.arq.next_to_clean = 0;
406
407
408 ret_code = iavf_alloc_adminq_arq_ring(hw);
409 if (ret_code != IAVF_SUCCESS)
410 goto init_adminq_exit;
411
412
413 ret_code = iavf_alloc_arq_bufs(hw);
414 if (ret_code != IAVF_SUCCESS)
415 goto init_adminq_free_rings;
416
417
418 ret_code = iavf_config_arq_regs(hw);
419 if (ret_code != IAVF_SUCCESS)
420 goto init_adminq_free_rings;
421
422
423 hw->aq.arq.count = hw->aq.num_arq_entries;
424 goto init_adminq_exit;
425
426init_adminq_free_rings:
427 iavf_free_adminq_arq(hw);
428
429init_adminq_exit:
430 return ret_code;
431}
432
433
434
435
436
437
438
439enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
440{
441 enum iavf_status ret_code = IAVF_SUCCESS;
442
443 iavf_acquire_spinlock(&hw->aq.asq_spinlock);
444
445 if (hw->aq.asq.count == 0) {
446 ret_code = IAVF_ERR_NOT_READY;
447 goto shutdown_asq_out;
448 }
449
450
451 wr32(hw, hw->aq.asq.head, 0);
452 wr32(hw, hw->aq.asq.tail, 0);
453 wr32(hw, hw->aq.asq.len, 0);
454 wr32(hw, hw->aq.asq.bal, 0);
455 wr32(hw, hw->aq.asq.bah, 0);
456
457 hw->aq.asq.count = 0;
458
459
460 iavf_free_asq_bufs(hw);
461
462shutdown_asq_out:
463 iavf_release_spinlock(&hw->aq.asq_spinlock);
464 return ret_code;
465}
466
467
468
469
470
471
472
473enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
474{
475 enum iavf_status ret_code = IAVF_SUCCESS;
476
477 iavf_acquire_spinlock(&hw->aq.arq_spinlock);
478
479 if (hw->aq.arq.count == 0) {
480 ret_code = IAVF_ERR_NOT_READY;
481 goto shutdown_arq_out;
482 }
483
484
485 wr32(hw, hw->aq.arq.head, 0);
486 wr32(hw, hw->aq.arq.tail, 0);
487 wr32(hw, hw->aq.arq.len, 0);
488 wr32(hw, hw->aq.arq.bal, 0);
489 wr32(hw, hw->aq.arq.bah, 0);
490
491 hw->aq.arq.count = 0;
492
493
494 iavf_free_arq_bufs(hw);
495
496shutdown_arq_out:
497 iavf_release_spinlock(&hw->aq.arq_spinlock);
498 return ret_code;
499}
500
501
502
503
504
505
506
507
508
509
510
511
512enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
513{
514 enum iavf_status ret_code;
515
516
517 if ((hw->aq.num_arq_entries == 0) ||
518 (hw->aq.num_asq_entries == 0) ||
519 (hw->aq.arq_buf_size == 0) ||
520 (hw->aq.asq_buf_size == 0)) {
521 ret_code = IAVF_ERR_CONFIG;
522 goto init_adminq_exit;
523 }
524 iavf_init_spinlock(&hw->aq.asq_spinlock);
525 iavf_init_spinlock(&hw->aq.arq_spinlock);
526
527
528 iavf_adminq_init_regs(hw);
529
530
531 hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
532
533
534 ret_code = iavf_init_asq(hw);
535 if (ret_code != IAVF_SUCCESS)
536 goto init_adminq_destroy_spinlocks;
537
538
539 ret_code = iavf_init_arq(hw);
540 if (ret_code != IAVF_SUCCESS)
541 goto init_adminq_free_asq;
542
543
544 goto init_adminq_exit;
545
546init_adminq_free_asq:
547 iavf_shutdown_asq(hw);
548init_adminq_destroy_spinlocks:
549 iavf_destroy_spinlock(&hw->aq.asq_spinlock);
550 iavf_destroy_spinlock(&hw->aq.arq_spinlock);
551
552init_adminq_exit:
553 return ret_code;
554}
555
556
557
558
559
560enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
561{
562 enum iavf_status ret_code = IAVF_SUCCESS;
563
564 if (iavf_check_asq_alive(hw))
565 iavf_aq_queue_shutdown(hw, true);
566
567 iavf_shutdown_asq(hw);
568 iavf_shutdown_arq(hw);
569 iavf_destroy_spinlock(&hw->aq.asq_spinlock);
570 iavf_destroy_spinlock(&hw->aq.arq_spinlock);
571
572 return ret_code;
573}
574
575
576
577
578
579
580
581u16 iavf_clean_asq(struct iavf_hw *hw)
582{
583 struct iavf_adminq_ring *asq = &(hw->aq.asq);
584 struct iavf_asq_cmd_details *details;
585 u16 ntc = asq->next_to_clean;
586 struct iavf_aq_desc desc_cb;
587 struct iavf_aq_desc *desc;
588
589 desc = IAVF_ADMINQ_DESC(*asq, ntc);
590 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
591 while (rd32(hw, hw->aq.asq.head) != ntc) {
592 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
593 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
594
595 if (details->callback) {
596 IAVF_ADMINQ_CALLBACK cb_func =
597 (IAVF_ADMINQ_CALLBACK)details->callback;
598 iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
599 IAVF_DMA_TO_DMA);
600 cb_func(hw, &desc_cb);
601 }
602 iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
603 iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
604 ntc++;
605 if (ntc == asq->count)
606 ntc = 0;
607 desc = IAVF_ADMINQ_DESC(*asq, ntc);
608 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
609 }
610
611 asq->next_to_clean = ntc;
612
613 return IAVF_DESC_UNUSED(asq);
614}
615
616
617
618
619
620
621
622
623bool iavf_asq_done(struct iavf_hw *hw)
624{
625
626
627
628 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
629
630}
631
632
633
634
635
636
637
638
639
640
641
642
643enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
644 struct iavf_aq_desc *desc,
645 void *buff,
646 u16 buff_size,
647 struct iavf_asq_cmd_details *cmd_details)
648{
649 enum iavf_status status = IAVF_SUCCESS;
650 struct iavf_dma_mem *dma_buff = NULL;
651 struct iavf_asq_cmd_details *details;
652 struct iavf_aq_desc *desc_on_ring;
653 bool cmd_completed = false;
654 u16 retval = 0;
655 u32 val = 0;
656
657 iavf_acquire_spinlock(&hw->aq.asq_spinlock);
658
659 hw->aq.asq_last_status = IAVF_AQ_RC_OK;
660
661 if (hw->aq.asq.count == 0) {
662 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
663 "AQTX: Admin queue not initialized.\n");
664 status = IAVF_ERR_QUEUE_EMPTY;
665 goto asq_send_command_error;
666 }
667
668 val = rd32(hw, hw->aq.asq.head);
669 if (val >= hw->aq.num_asq_entries) {
670 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
671 "AQTX: head overrun at %d\n", val);
672 status = IAVF_ERR_QUEUE_EMPTY;
673 goto asq_send_command_error;
674 }
675
676 details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
677 if (cmd_details) {
678 iavf_memcpy(details,
679 cmd_details,
680 sizeof(struct iavf_asq_cmd_details),
681 IAVF_NONDMA_TO_NONDMA);
682
683
684
685
686
687 if (details->cookie) {
688 desc->cookie_high =
689 CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
690 desc->cookie_low =
691 CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
692 }
693 } else {
694 iavf_memset(details, 0,
695 sizeof(struct iavf_asq_cmd_details),
696 IAVF_NONDMA_MEM);
697 }
698
699
700 desc->flags &= ~CPU_TO_LE16(details->flags_dis);
701 desc->flags |= CPU_TO_LE16(details->flags_ena);
702
703 if (buff_size > hw->aq.asq_buf_size) {
704 iavf_debug(hw,
705 IAVF_DEBUG_AQ_MESSAGE,
706 "AQTX: Invalid buffer size: %d.\n",
707 buff_size);
708 status = IAVF_ERR_INVALID_SIZE;
709 goto asq_send_command_error;
710 }
711
712 if (details->postpone && !details->async) {
713 iavf_debug(hw,
714 IAVF_DEBUG_AQ_MESSAGE,
715 "AQTX: Async flag not set along with postpone flag");
716 status = IAVF_ERR_PARAM;
717 goto asq_send_command_error;
718 }
719
720
721
722
723
724
725
726
727 if (iavf_clean_asq(hw) == 0) {
728 iavf_debug(hw,
729 IAVF_DEBUG_AQ_MESSAGE,
730 "AQTX: Error queue is full.\n");
731 status = IAVF_ERR_ADMIN_QUEUE_FULL;
732 goto asq_send_command_error;
733 }
734
735
736 desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
737
738
739 iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
740 IAVF_NONDMA_TO_DMA);
741
742
743 if (buff != NULL) {
744 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
745
746 iavf_memcpy(dma_buff->va, buff, buff_size,
747 IAVF_NONDMA_TO_DMA);
748 desc_on_ring->datalen = CPU_TO_LE16(buff_size);
749
750
751
752
753 desc_on_ring->params.external.addr_high =
754 CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
755 desc_on_ring->params.external.addr_low =
756 CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
757 }
758
759
760 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
761 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
762 buff, buff_size);
763 (hw->aq.asq.next_to_use)++;
764 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
765 hw->aq.asq.next_to_use = 0;
766 if (!details->postpone)
767 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
768
769
770
771
772 if (!details->async && !details->postpone) {
773 u32 total_delay = 0;
774
775 do {
776
777
778
779 if (iavf_asq_done(hw))
780 break;
781 iavf_usec_delay(50);
782 total_delay += 50;
783 } while (total_delay < hw->aq.asq_cmd_timeout);
784 }
785
786
787 if (iavf_asq_done(hw)) {
788 iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
789 IAVF_DMA_TO_NONDMA);
790 if (buff != NULL)
791 iavf_memcpy(buff, dma_buff->va, buff_size,
792 IAVF_DMA_TO_NONDMA);
793 retval = LE16_TO_CPU(desc->retval);
794 if (retval != 0) {
795 iavf_debug(hw,
796 IAVF_DEBUG_AQ_MESSAGE,
797 "AQTX: Command completed with error 0x%X.\n",
798 retval);
799
800
801 retval &= 0xff;
802 }
803 cmd_completed = true;
804 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
805 status = IAVF_SUCCESS;
806 else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
807 status = IAVF_ERR_NOT_READY;
808 else
809 status = IAVF_ERR_ADMIN_QUEUE_ERROR;
810 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
811 }
812
813 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
814 "AQTX: desc and buffer writeback:\n");
815 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
816
817
818 if (details->wb_desc)
819 iavf_memcpy(details->wb_desc, desc_on_ring,
820 sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
821
822
823 if ((!cmd_completed) &&
824 (!details->async && !details->postpone)) {
825 if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
826 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
827 "AQTX: AQ Critical error.\n");
828 status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
829 } else {
830 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
831 "AQTX: Writeback timeout.\n");
832 status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
833 }
834 }
835
836asq_send_command_error:
837 iavf_release_spinlock(&hw->aq.asq_spinlock);
838 return status;
839}
840
841
842
843
844
845
846
847
848void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
849 u16 opcode)
850{
851
852 iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
853 IAVF_NONDMA_MEM);
854 desc->opcode = CPU_TO_LE16(opcode);
855 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
856}
857
858
859
860
861
862
863
864
865
866
867
868enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
869 struct iavf_arq_event_info *e,
870 u16 *pending)
871{
872 enum iavf_status ret_code = IAVF_SUCCESS;
873 u16 ntc = hw->aq.arq.next_to_clean;
874 struct iavf_aq_desc *desc;
875 struct iavf_dma_mem *bi;
876 u16 desc_idx;
877 u16 datalen;
878 u16 flags;
879 u16 ntu;
880
881
882 iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
883
884
885 iavf_acquire_spinlock(&hw->aq.arq_spinlock);
886
887 if (hw->aq.arq.count == 0) {
888 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
889 "AQRX: Admin queue not initialized.\n");
890 ret_code = IAVF_ERR_QUEUE_EMPTY;
891 goto clean_arq_element_err;
892 }
893
894
895 ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
896 if (ntu == ntc) {
897
898 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
899 goto clean_arq_element_out;
900 }
901
902
903 desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
904 desc_idx = ntc;
905
906 hw->aq.arq_last_status =
907 (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
908 flags = LE16_TO_CPU(desc->flags);
909 if (flags & IAVF_AQ_FLAG_ERR) {
910 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
911 iavf_debug(hw,
912 IAVF_DEBUG_AQ_MESSAGE,
913 "AQRX: Event received with error 0x%X.\n",
914 hw->aq.arq_last_status);
915 }
916
917 iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
918 IAVF_DMA_TO_NONDMA);
919 datalen = LE16_TO_CPU(desc->datalen);
920 e->msg_len = min(datalen, e->buf_len);
921 if (e->msg_buf != NULL && (e->msg_len != 0))
922 iavf_memcpy(e->msg_buf,
923 hw->aq.arq.r.arq_bi[desc_idx].va,
924 e->msg_len, IAVF_DMA_TO_NONDMA);
925
926 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
927 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
928 hw->aq.arq_buf_size);
929
930
931
932
933
934 bi = &hw->aq.arq.r.arq_bi[ntc];
935 iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
936
937 desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
938 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
939 desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
940 desc->datalen = CPU_TO_LE16((u16)bi->size);
941 desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
942 desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
943
944
945 wr32(hw, hw->aq.arq.tail, ntc);
946
947 ntc++;
948 if (ntc == hw->aq.num_arq_entries)
949 ntc = 0;
950 hw->aq.arq.next_to_clean = ntc;
951 hw->aq.arq.next_to_use = ntu;
952
953clean_arq_element_out:
954
955 if (pending != NULL)
956 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
957clean_arq_element_err:
958 iavf_release_spinlock(&hw->aq.arq_spinlock);
959
960 return ret_code;
961}
962
963