1
2
3
4#include <linux/dma-mapping.h>
5#include <linux/slab.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/err.h>
9#include <linux/dma-direction.h>
10#include "hclge_cmd.h"
11#include "hnae3.h"
12#include "hclge_main.h"
13
14#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
15
16static int hclge_ring_space(struct hclge_cmq_ring *ring)
17{
18 int ntu = ring->next_to_use;
19 int ntc = ring->next_to_clean;
20 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
21
22 return ring->desc_num - used - 1;
23}
24
25static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
26{
27 int ntu = ring->next_to_use;
28 int ntc = ring->next_to_clean;
29
30 if (ntu > ntc)
31 return head >= ntc && head <= ntu;
32
33 return head >= ntc || head <= ntu;
34}
35
36static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
37{
38 int size = ring->desc_num * sizeof(struct hclge_desc);
39
40 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
41 &ring->desc_dma_addr, GFP_KERNEL);
42 if (!ring->desc)
43 return -ENOMEM;
44
45 return 0;
46}
47
48static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
49{
50 int size = ring->desc_num * sizeof(struct hclge_desc);
51
52 if (ring->desc) {
53 dma_free_coherent(cmq_ring_to_dev(ring), size,
54 ring->desc, ring->desc_dma_addr);
55 ring->desc = NULL;
56 }
57}
58
59static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
60{
61 struct hclge_hw *hw = &hdev->hw;
62 struct hclge_cmq_ring *ring =
63 (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
64 int ret;
65
66 ring->ring_type = ring_type;
67 ring->dev = hdev;
68
69 ret = hclge_alloc_cmd_desc(ring);
70 if (ret) {
71 dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
72 (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
73 return ret;
74 }
75
76 return 0;
77}
78
79void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
80{
81 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
82 if (is_read)
83 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
84 else
85 desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
86}
87
88void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
89 enum hclge_opcode_type opcode, bool is_read)
90{
91 memset((void *)desc, 0, sizeof(struct hclge_desc));
92 desc->opcode = cpu_to_le16(opcode);
93 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
94
95 if (is_read)
96 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
97}
98
99static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
100{
101 dma_addr_t dma = ring->desc_dma_addr;
102 struct hclge_dev *hdev = ring->dev;
103 struct hclge_hw *hw = &hdev->hw;
104 u32 reg_val;
105
106 if (ring->ring_type == HCLGE_TYPE_CSQ) {
107 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
108 lower_32_bits(dma));
109 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
110 upper_32_bits(dma));
111 reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
112 reg_val &= HCLGE_NIC_SW_RST_RDY;
113 reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
114 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
115 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
116 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
117 } else {
118 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
119 lower_32_bits(dma));
120 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
121 upper_32_bits(dma));
122 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
123 ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
124 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
125 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
126 }
127}
128
129static void hclge_cmd_init_regs(struct hclge_hw *hw)
130{
131 hclge_cmd_config_regs(&hw->cmq.csq);
132 hclge_cmd_config_regs(&hw->cmq.crq);
133}
134
135static int hclge_cmd_csq_clean(struct hclge_hw *hw)
136{
137 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
138 struct hclge_cmq_ring *csq = &hw->cmq.csq;
139 u32 head;
140 int clean;
141
142 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
143 rmb();
144
145 if (!is_valid_csq_clean_head(csq, head)) {
146 dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
147 csq->next_to_use, csq->next_to_clean);
148 dev_warn(&hdev->pdev->dev,
149 "Disabling any further commands to IMP firmware\n");
150 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
151 dev_warn(&hdev->pdev->dev,
152 "IMP firmware watchdog reset soon expected!\n");
153 return -EIO;
154 }
155
156 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
157 csq->next_to_clean = head;
158 return clean;
159}
160
161static int hclge_cmd_csq_done(struct hclge_hw *hw)
162{
163 u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
164 return head == hw->cmq.csq.next_to_use;
165}
166
167static bool hclge_is_special_opcode(u16 opcode)
168{
169
170
171
172 static const u16 spec_opcode[] = {
173 HCLGE_OPC_STATS_64_BIT,
174 HCLGE_OPC_STATS_32_BIT,
175 HCLGE_OPC_STATS_MAC,
176 HCLGE_OPC_STATS_MAC_ALL,
177 HCLGE_OPC_QUERY_32_BIT_REG,
178 HCLGE_OPC_QUERY_64_BIT_REG,
179 HCLGE_QUERY_CLEAR_MPF_RAS_INT,
180 HCLGE_QUERY_CLEAR_PF_RAS_INT,
181 HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
182 HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
183 HCLGE_QUERY_ALL_ERR_INFO
184 };
185 int i;
186
187 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
188 if (spec_opcode[i] == opcode)
189 return true;
190 }
191
192 return false;
193}
194
195struct errcode {
196 u32 imp_errcode;
197 int common_errno;
198};
199
200static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
201 int num)
202{
203 struct hclge_desc *desc_to_use;
204 int handle = 0;
205
206 while (handle < num) {
207 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
208 *desc_to_use = desc[handle];
209 (hw->cmq.csq.next_to_use)++;
210 if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
211 hw->cmq.csq.next_to_use = 0;
212 handle++;
213 }
214}
215
216static int hclge_cmd_convert_err_code(u16 desc_ret)
217{
218 struct errcode hclge_cmd_errcode[] = {
219 {HCLGE_CMD_EXEC_SUCCESS, 0},
220 {HCLGE_CMD_NO_AUTH, -EPERM},
221 {HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
222 {HCLGE_CMD_QUEUE_FULL, -EXFULL},
223 {HCLGE_CMD_NEXT_ERR, -ENOSR},
224 {HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
225 {HCLGE_CMD_PARA_ERR, -EINVAL},
226 {HCLGE_CMD_RESULT_ERR, -ERANGE},
227 {HCLGE_CMD_TIMEOUT, -ETIME},
228 {HCLGE_CMD_HILINK_ERR, -ENOLINK},
229 {HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
230 {HCLGE_CMD_INVALID, -EBADR},
231 };
232 u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
233 u32 i;
234
235 for (i = 0; i < errcode_count; i++)
236 if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
237 return hclge_cmd_errcode[i].common_errno;
238
239 return -EIO;
240}
241
242static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
243 int num, int ntc)
244{
245 u16 opcode, desc_ret;
246 int handle;
247
248 opcode = le16_to_cpu(desc[0].opcode);
249 for (handle = 0; handle < num; handle++) {
250 desc[handle] = hw->cmq.csq.desc[ntc];
251 ntc++;
252 if (ntc >= hw->cmq.csq.desc_num)
253 ntc = 0;
254 }
255 if (likely(!hclge_is_special_opcode(opcode)))
256 desc_ret = le16_to_cpu(desc[num - 1].retval);
257 else
258 desc_ret = le16_to_cpu(desc[0].retval);
259
260 hw->cmq.last_status = desc_ret;
261
262 return hclge_cmd_convert_err_code(desc_ret);
263}
264
265static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
266 int num, int ntc)
267{
268 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
269 bool is_completed = false;
270 u32 timeout = 0;
271 int handle, ret;
272
273
274
275
276
277 if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
278 do {
279 if (hclge_cmd_csq_done(hw)) {
280 is_completed = true;
281 break;
282 }
283 udelay(1);
284 timeout++;
285 } while (timeout < hw->cmq.tx_timeout);
286 }
287
288 if (!is_completed)
289 ret = -EBADE;
290 else
291 ret = hclge_cmd_check_retval(hw, desc, num, ntc);
292
293
294 handle = hclge_cmd_csq_clean(hw);
295 if (handle < 0)
296 ret = handle;
297 else if (handle != num)
298 dev_warn(&hdev->pdev->dev,
299 "cleaned %d, need to clean %d\n", handle, num);
300 return ret;
301}
302
303
304
305
306
307
308
309
310
311
312int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
313{
314 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
315 struct hclge_cmq_ring *csq = &hw->cmq.csq;
316 int ret;
317 int ntc;
318
319 spin_lock_bh(&hw->cmq.csq.lock);
320
321 if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
322 spin_unlock_bh(&hw->cmq.csq.lock);
323 return -EBUSY;
324 }
325
326 if (num > hclge_ring_space(&hw->cmq.csq)) {
327
328
329
330 csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
331 spin_unlock_bh(&hw->cmq.csq.lock);
332 return -EBUSY;
333 }
334
335
336
337
338
339 ntc = hw->cmq.csq.next_to_use;
340
341 hclge_cmd_copy_desc(hw, desc, num);
342
343
344 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
345
346 ret = hclge_cmd_check_result(hw, desc, num, ntc);
347
348 spin_unlock_bh(&hw->cmq.csq.lock);
349
350 return ret;
351}
352
353static void hclge_set_default_capability(struct hclge_dev *hdev)
354{
355 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
356
357 set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
358 set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
359 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
360 set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
361 set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
362 }
363}
364
365static const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = {
366 {HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
367 {HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
368 {HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
369 {HCLGE_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
370 {HCLGE_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
371 {HCLGE_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
372 {HCLGE_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
373 {HCLGE_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
374 {HCLGE_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
375 {HCLGE_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
376 {HCLGE_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
377 {HCLGE_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
378 {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
379 {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
380};
381
382static void hclge_parse_capability(struct hclge_dev *hdev,
383 struct hclge_query_version_cmd *cmd)
384{
385 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
386 u32 caps, i;
387
388 caps = __le32_to_cpu(cmd->caps[0]);
389 for (i = 0; i < ARRAY_SIZE(hclge_cmd_caps_bit_map0); i++)
390 if (hnae3_get_bit(caps, hclge_cmd_caps_bit_map0[i].imp_bit))
391 set_bit(hclge_cmd_caps_bit_map0[i].local_bit,
392 ae_dev->caps);
393}
394
395static __le32 hclge_build_api_caps(void)
396{
397 u32 api_caps = 0;
398
399 hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1);
400
401 return cpu_to_le32(api_caps);
402}
403
404static enum hclge_cmd_status
405hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
406{
407 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
408 struct hclge_query_version_cmd *resp;
409 struct hclge_desc desc;
410 int ret;
411
412 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
413 resp = (struct hclge_query_version_cmd *)desc.data;
414 resp->api_caps = hclge_build_api_caps();
415
416 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
417 if (ret)
418 return ret;
419
420 hdev->fw_version = le32_to_cpu(resp->firmware);
421
422 ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
423 HNAE3_PCI_REVISION_BIT_SIZE;
424 ae_dev->dev_version |= hdev->pdev->revision;
425
426 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
427 hclge_set_default_capability(hdev);
428
429 hclge_parse_capability(hdev, resp);
430
431 return ret;
432}
433
434int hclge_cmd_queue_init(struct hclge_dev *hdev)
435{
436 int ret;
437
438
439 spin_lock_init(&hdev->hw.cmq.csq.lock);
440 spin_lock_init(&hdev->hw.cmq.crq.lock);
441
442
443 hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
444 hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
445
446
447 hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
448
449
450 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
451 if (ret) {
452 dev_err(&hdev->pdev->dev,
453 "CSQ ring setup error %d\n", ret);
454 return ret;
455 }
456
457 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
458 if (ret) {
459 dev_err(&hdev->pdev->dev,
460 "CRQ ring setup error %d\n", ret);
461 goto err_csq;
462 }
463
464 return 0;
465err_csq:
466 hclge_free_cmd_desc(&hdev->hw.cmq.csq);
467 return ret;
468}
469
470static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
471{
472 struct hclge_firmware_compat_cmd *req;
473 struct hclge_desc desc;
474 u32 compat = 0;
475
476 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
477
478 if (en) {
479 req = (struct hclge_firmware_compat_cmd *)desc.data;
480
481 hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
482 hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
483 if (hnae3_dev_phy_imp_supported(hdev))
484 hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
485
486 req->compat = cpu_to_le32(compat);
487 }
488
489 return hclge_cmd_send(&hdev->hw, &desc, 1);
490}
491
492int hclge_cmd_init(struct hclge_dev *hdev)
493{
494 int ret;
495
496 spin_lock_bh(&hdev->hw.cmq.csq.lock);
497 spin_lock(&hdev->hw.cmq.crq.lock);
498
499 hdev->hw.cmq.csq.next_to_clean = 0;
500 hdev->hw.cmq.csq.next_to_use = 0;
501 hdev->hw.cmq.crq.next_to_clean = 0;
502 hdev->hw.cmq.crq.next_to_use = 0;
503
504 hclge_cmd_init_regs(&hdev->hw);
505
506 spin_unlock(&hdev->hw.cmq.crq.lock);
507 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
508
509 clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
510
511
512
513
514 if ((hclge_is_reset_pending(hdev))) {
515 dev_err(&hdev->pdev->dev,
516 "failed to init cmd since reset %#lx pending\n",
517 hdev->reset_pending);
518 ret = -EBUSY;
519 goto err_cmd_init;
520 }
521
522
523 ret = hclge_cmd_query_version_and_capability(hdev);
524 if (ret) {
525 dev_err(&hdev->pdev->dev,
526 "failed to query version and capabilities, ret = %d\n",
527 ret);
528 goto err_cmd_init;
529 }
530
531 dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
532 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
533 HNAE3_FW_VERSION_BYTE3_SHIFT),
534 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
535 HNAE3_FW_VERSION_BYTE2_SHIFT),
536 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
537 HNAE3_FW_VERSION_BYTE1_SHIFT),
538 hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
539 HNAE3_FW_VERSION_BYTE0_SHIFT));
540
541
542
543
544 ret = hclge_firmware_compat_config(hdev, true);
545 if (ret)
546 dev_warn(&hdev->pdev->dev,
547 "Firmware compatible features not enabled(%d).\n",
548 ret);
549
550 return 0;
551
552err_cmd_init:
553 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
554
555 return ret;
556}
557
558static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
559{
560 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
561 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
562 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
563 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
564 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
565 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
566 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
567 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
568 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
569 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
570}
571
572void hclge_cmd_uninit(struct hclge_dev *hdev)
573{
574 hclge_firmware_compat_config(hdev, false);
575
576 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
577
578
579
580 msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
581 spin_lock_bh(&hdev->hw.cmq.csq.lock);
582 spin_lock(&hdev->hw.cmq.crq.lock);
583 hclge_cmd_uninit_regs(&hdev->hw);
584 spin_unlock(&hdev->hw.cmq.crq.lock);
585 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
586
587 hclge_free_cmd_desc(&hdev->hw.cmq.csq);
588 hclge_free_cmd_desc(&hdev->hw.cmq.crq);
589}
590