1
2
3
4#include <linux/device.h>
5#include <linux/dma-direction.h>
6#include <linux/dma-mapping.h>
7#include <linux/err.h>
8#include <linux/pci.h>
9#include <linux/slab.h>
10#include "hclgevf_cmd.h"
11#include "hclgevf_main.h"
12#include "hnae3.h"
13
14#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
15
16static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
17{
18 int ntc = ring->next_to_clean;
19 int ntu = ring->next_to_use;
20 int used;
21
22 used = (ntu - ntc + ring->desc_num) % ring->desc_num;
23
24 return ring->desc_num - used - 1;
25}
26
27static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
28 int head)
29{
30 int ntu = ring->next_to_use;
31 int ntc = ring->next_to_clean;
32
33 if (ntu > ntc)
34 return head >= ntc && head <= ntu;
35
36 return head >= ntc || head <= ntu;
37}
38
39static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
40{
41 struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
42 struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
43 int clean;
44 u32 head;
45
46 head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
47 rmb();
48
49 if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
50 dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
51 csq->next_to_use, csq->next_to_clean);
52 dev_warn(&hdev->pdev->dev,
53 "Disabling any further commands to IMP firmware\n");
54 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
55 return -EIO;
56 }
57
58 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
59 csq->next_to_clean = head;
60 return clean;
61}
62
63static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
64{
65 u32 head;
66
67 head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
68
69 return head == hw->cmq.csq.next_to_use;
70}
71
72static bool hclgevf_is_special_opcode(u16 opcode)
73{
74 static const u16 spec_opcode[] = {0x30, 0x31, 0x32};
75 int i;
76
77 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
78 if (spec_opcode[i] == opcode)
79 return true;
80 }
81
82 return false;
83}
84
85static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
86{
87 struct hclgevf_dev *hdev = ring->dev;
88 struct hclgevf_hw *hw = &hdev->hw;
89 u32 reg_val;
90
91 if (ring->flag == HCLGEVF_TYPE_CSQ) {
92 reg_val = lower_32_bits(ring->desc_dma_addr);
93 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
94 reg_val = upper_32_bits(ring->desc_dma_addr);
95 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
96
97 reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
98 reg_val &= HCLGEVF_NIC_SW_RST_RDY;
99 reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
100 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
101
102 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
103 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
104 } else {
105 reg_val = lower_32_bits(ring->desc_dma_addr);
106 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
107 reg_val = upper_32_bits(ring->desc_dma_addr);
108 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
109
110 reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
111 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
112
113 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
114 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
115 }
116}
117
118static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
119{
120 hclgevf_cmd_config_regs(&hw->cmq.csq);
121 hclgevf_cmd_config_regs(&hw->cmq.crq);
122}
123
124static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
125{
126 int size = ring->desc_num * sizeof(struct hclgevf_desc);
127
128 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
129 &ring->desc_dma_addr, GFP_KERNEL);
130 if (!ring->desc)
131 return -ENOMEM;
132
133 return 0;
134}
135
136static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
137{
138 int size = ring->desc_num * sizeof(struct hclgevf_desc);
139
140 if (ring->desc) {
141 dma_free_coherent(cmq_ring_to_dev(ring), size,
142 ring->desc, ring->desc_dma_addr);
143 ring->desc = NULL;
144 }
145}
146
147static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
148{
149 struct hclgevf_hw *hw = &hdev->hw;
150 struct hclgevf_cmq_ring *ring =
151 (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
152 int ret;
153
154 ring->dev = hdev;
155 ring->flag = ring_type;
156
157
158 ret = hclgevf_alloc_cmd_desc(ring);
159 if (ret)
160 dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
161 (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
162
163 return ret;
164}
165
166void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
167 enum hclgevf_opcode_type opcode, bool is_read)
168{
169 memset(desc, 0, sizeof(struct hclgevf_desc));
170 desc->opcode = cpu_to_le16(opcode);
171 desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
172 HCLGEVF_CMD_FLAG_IN);
173 if (is_read)
174 desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
175 else
176 desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
177}
178
179static int hclgevf_cmd_convert_err_code(u16 desc_ret)
180{
181 switch (desc_ret) {
182 case HCLGEVF_CMD_EXEC_SUCCESS:
183 return 0;
184 case HCLGEVF_CMD_NO_AUTH:
185 return -EPERM;
186 case HCLGEVF_CMD_NOT_SUPPORTED:
187 return -EOPNOTSUPP;
188 case HCLGEVF_CMD_QUEUE_FULL:
189 return -EXFULL;
190 case HCLGEVF_CMD_NEXT_ERR:
191 return -ENOSR;
192 case HCLGEVF_CMD_UNEXE_ERR:
193 return -ENOTBLK;
194 case HCLGEVF_CMD_PARA_ERR:
195 return -EINVAL;
196 case HCLGEVF_CMD_RESULT_ERR:
197 return -ERANGE;
198 case HCLGEVF_CMD_TIMEOUT:
199 return -ETIME;
200 case HCLGEVF_CMD_HILINK_ERR:
201 return -ENOLINK;
202 case HCLGEVF_CMD_QUEUE_ILLEGAL:
203 return -ENXIO;
204 case HCLGEVF_CMD_INVALID:
205 return -EBADR;
206 default:
207 return -EIO;
208 }
209}
210
211
212
213
214
215
216
217
218
219int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
220{
221 struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
222 struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
223 struct hclgevf_desc *desc_to_use;
224 bool complete = false;
225 u32 timeout = 0;
226 int handle = 0;
227 int status = 0;
228 u16 retval;
229 u16 opcode;
230 int ntc;
231
232 spin_lock_bh(&hw->cmq.csq.lock);
233
234 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
235 spin_unlock_bh(&hw->cmq.csq.lock);
236 return -EBUSY;
237 }
238
239 if (num > hclgevf_ring_space(&hw->cmq.csq)) {
240
241
242
243 csq->next_to_clean = hclgevf_read_dev(hw,
244 HCLGEVF_NIC_CSQ_HEAD_REG);
245 spin_unlock_bh(&hw->cmq.csq.lock);
246 return -EBUSY;
247 }
248
249
250
251
252 ntc = hw->cmq.csq.next_to_use;
253 opcode = le16_to_cpu(desc[0].opcode);
254 while (handle < num) {
255 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
256 *desc_to_use = desc[handle];
257 (hw->cmq.csq.next_to_use)++;
258 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
259 hw->cmq.csq.next_to_use = 0;
260 handle++;
261 }
262
263
264 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
265 hw->cmq.csq.next_to_use);
266
267
268
269
270 if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
271 do {
272 if (hclgevf_cmd_csq_done(hw))
273 break;
274 udelay(1);
275 timeout++;
276 } while (timeout < hw->cmq.tx_timeout);
277 }
278
279 if (hclgevf_cmd_csq_done(hw)) {
280 complete = true;
281 handle = 0;
282
283 while (handle < num) {
284
285 desc_to_use = &hw->cmq.csq.desc[ntc];
286 desc[handle] = *desc_to_use;
287
288 if (likely(!hclgevf_is_special_opcode(opcode)))
289 retval = le16_to_cpu(desc[handle].retval);
290 else
291 retval = le16_to_cpu(desc[0].retval);
292
293 status = hclgevf_cmd_convert_err_code(retval);
294 hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
295 ntc++;
296 handle++;
297 if (ntc == hw->cmq.csq.desc_num)
298 ntc = 0;
299 }
300 }
301
302 if (!complete)
303 status = -EBADE;
304
305
306 handle = hclgevf_cmd_csq_clean(hw);
307 if (handle != num)
308 dev_warn(&hdev->pdev->dev,
309 "cleaned %d, need to clean %d\n", handle, num);
310
311 spin_unlock_bh(&hw->cmq.csq.lock);
312
313 return status;
314}
315
316static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
317 u32 *version)
318{
319 struct hclgevf_query_version_cmd *resp;
320 struct hclgevf_desc desc;
321 int status;
322
323 resp = (struct hclgevf_query_version_cmd *)desc.data;
324
325 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
326 status = hclgevf_cmd_send(hw, &desc, 1);
327 if (!status)
328 *version = le32_to_cpu(resp->firmware);
329
330 return status;
331}
332
333int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
334{
335 int ret;
336
337
338 spin_lock_init(&hdev->hw.cmq.csq.lock);
339 spin_lock_init(&hdev->hw.cmq.crq.lock);
340
341 hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
342 hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
343 hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
344
345 ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
346 if (ret) {
347 dev_err(&hdev->pdev->dev,
348 "CSQ ring setup error %d\n", ret);
349 return ret;
350 }
351
352 ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
353 if (ret) {
354 dev_err(&hdev->pdev->dev,
355 "CRQ ring setup error %d\n", ret);
356 goto err_csq;
357 }
358
359 return 0;
360err_csq:
361 hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
362 return ret;
363}
364
365int hclgevf_cmd_init(struct hclgevf_dev *hdev)
366{
367 u32 version;
368 int ret;
369
370 spin_lock_bh(&hdev->hw.cmq.csq.lock);
371 spin_lock(&hdev->hw.cmq.crq.lock);
372
373
374 hdev->arq.hdev = hdev;
375 hdev->arq.head = 0;
376 hdev->arq.tail = 0;
377 atomic_set(&hdev->arq.count, 0);
378 hdev->hw.cmq.csq.next_to_clean = 0;
379 hdev->hw.cmq.csq.next_to_use = 0;
380 hdev->hw.cmq.crq.next_to_clean = 0;
381 hdev->hw.cmq.crq.next_to_use = 0;
382
383 hclgevf_cmd_init_regs(&hdev->hw);
384
385 spin_unlock(&hdev->hw.cmq.crq.lock);
386 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
387
388 clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
389
390
391
392
393 if (hclgevf_is_reset_pending(hdev)) {
394 ret = -EBUSY;
395 goto err_cmd_init;
396 }
397
398
399 ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
400 if (ret) {
401 dev_err(&hdev->pdev->dev,
402 "failed(%d) to query firmware version\n", ret);
403 goto err_cmd_init;
404 }
405 hdev->fw_version = version;
406
407 dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
408 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
409 HNAE3_FW_VERSION_BYTE3_SHIFT),
410 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
411 HNAE3_FW_VERSION_BYTE2_SHIFT),
412 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
413 HNAE3_FW_VERSION_BYTE1_SHIFT),
414 hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
415 HNAE3_FW_VERSION_BYTE0_SHIFT));
416
417 return 0;
418
419err_cmd_init:
420 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
421
422 return ret;
423}
424
425static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
426{
427 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
428 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
429 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
430 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
431 hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
432 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
433 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
434 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
435 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
436 hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
437}
438
439void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
440{
441 spin_lock_bh(&hdev->hw.cmq.csq.lock);
442 spin_lock(&hdev->hw.cmq.crq.lock);
443 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
444 hclgevf_cmd_uninit_regs(&hdev->hw);
445 spin_unlock(&hdev->hw.cmq.crq.lock);
446 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
447 hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
448 hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
449}
450