1
2
3
4#include <linux/dma-mapping.h>
5#include <linux/slab.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/err.h>
9#include <linux/dma-direction.h>
10#include "hclge_cmd.h"
11#include "hnae3.h"
12#include "hclge_main.h"
13
14#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
15
16#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
17
18static int hclge_ring_space(struct hclge_cmq_ring *ring)
19{
20 int ntu = ring->next_to_use;
21 int ntc = ring->next_to_clean;
22 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
23
24 return ring->desc_num - used - 1;
25}
26
27static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
28{
29 int ntu = ring->next_to_use;
30 int ntc = ring->next_to_clean;
31
32 if (ntu > ntc)
33 return head >= ntc && head <= ntu;
34
35 return head >= ntc || head <= ntu;
36}
37
38static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
39{
40 int size = ring->desc_num * sizeof(struct hclge_desc);
41
42 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
43 &ring->desc_dma_addr, GFP_KERNEL);
44 if (!ring->desc)
45 return -ENOMEM;
46
47 return 0;
48}
49
50static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
51{
52 int size = ring->desc_num * sizeof(struct hclge_desc);
53
54 if (ring->desc) {
55 dma_free_coherent(cmq_ring_to_dev(ring), size,
56 ring->desc, ring->desc_dma_addr);
57 ring->desc = NULL;
58 }
59}
60
61static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
62{
63 struct hclge_hw *hw = &hdev->hw;
64 struct hclge_cmq_ring *ring =
65 (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
66 int ret;
67
68 ring->ring_type = ring_type;
69 ring->dev = hdev;
70
71 ret = hclge_alloc_cmd_desc(ring);
72 if (ret) {
73 dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
74 (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
75 return ret;
76 }
77
78 return 0;
79}
80
81void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
82{
83 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
84 if (is_read)
85 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
86 else
87 desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
88}
89
90void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
91 enum hclge_opcode_type opcode, bool is_read)
92{
93 memset((void *)desc, 0, sizeof(struct hclge_desc));
94 desc->opcode = cpu_to_le16(opcode);
95 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
96
97 if (is_read)
98 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
99}
100
101static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
102{
103 dma_addr_t dma = ring->desc_dma_addr;
104 struct hclge_dev *hdev = ring->dev;
105 struct hclge_hw *hw = &hdev->hw;
106
107 if (ring->ring_type == HCLGE_TYPE_CSQ) {
108 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
109 lower_32_bits(dma));
110 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
111 upper_32_bits(dma));
112 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
113 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
114 HCLGE_NIC_CMQ_ENABLE);
115 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
116 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
117 } else {
118 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
119 lower_32_bits(dma));
120 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
121 upper_32_bits(dma));
122 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
123 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
124 HCLGE_NIC_CMQ_ENABLE);
125 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
126 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
127 }
128}
129
130static void hclge_cmd_init_regs(struct hclge_hw *hw)
131{
132 hclge_cmd_config_regs(&hw->cmq.csq);
133 hclge_cmd_config_regs(&hw->cmq.crq);
134}
135
136static int hclge_cmd_csq_clean(struct hclge_hw *hw)
137{
138 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
139 struct hclge_cmq_ring *csq = &hw->cmq.csq;
140 u32 head;
141 int clean;
142
143 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
144 rmb();
145
146 if (!is_valid_csq_clean_head(csq, head)) {
147 dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
148 csq->next_to_use, csq->next_to_clean);
149 dev_warn(&hdev->pdev->dev,
150 "Disabling any further commands to IMP firmware\n");
151 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
152 dev_warn(&hdev->pdev->dev,
153 "IMP firmware watchdog reset soon expected!\n");
154 return -EIO;
155 }
156
157 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
158 csq->next_to_clean = head;
159 return clean;
160}
161
162static int hclge_cmd_csq_done(struct hclge_hw *hw)
163{
164 u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
165 return head == hw->cmq.csq.next_to_use;
166}
167
168static bool hclge_is_special_opcode(u16 opcode)
169{
170
171
172
173 u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
174 HCLGE_OPC_STATS_32_BIT,
175 HCLGE_OPC_STATS_MAC,
176 HCLGE_OPC_STATS_MAC_ALL,
177 HCLGE_OPC_QUERY_32_BIT_REG,
178 HCLGE_OPC_QUERY_64_BIT_REG};
179 int i;
180
181 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
182 if (spec_opcode[i] == opcode)
183 return true;
184 }
185
186 return false;
187}
188
189static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
190 int num, int ntc)
191{
192 u16 opcode, desc_ret;
193 int handle;
194 int retval;
195
196 opcode = le16_to_cpu(desc[0].opcode);
197 for (handle = 0; handle < num; handle++) {
198 desc[handle] = hw->cmq.csq.desc[ntc];
199 ntc++;
200 if (ntc >= hw->cmq.csq.desc_num)
201 ntc = 0;
202 }
203 if (likely(!hclge_is_special_opcode(opcode)))
204 desc_ret = le16_to_cpu(desc[num - 1].retval);
205 else
206 desc_ret = le16_to_cpu(desc[0].retval);
207
208 if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
209 retval = 0;
210 else if (desc_ret == HCLGE_CMD_NO_AUTH)
211 retval = -EPERM;
212 else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
213 retval = -EOPNOTSUPP;
214 else
215 retval = -EIO;
216 hw->cmq.last_status = desc_ret;
217
218 return retval;
219}
220
221
222
223
224
225
226
227
228
229
230int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
231{
232 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
233 struct hclge_desc *desc_to_use;
234 bool complete = false;
235 u32 timeout = 0;
236 int handle = 0;
237 int retval = 0;
238 int ntc;
239
240 spin_lock_bh(&hw->cmq.csq.lock);
241
242 if (num > hclge_ring_space(&hw->cmq.csq) ||
243 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
244 spin_unlock_bh(&hw->cmq.csq.lock);
245 return -EBUSY;
246 }
247
248
249
250
251
252 ntc = hw->cmq.csq.next_to_use;
253 while (handle < num) {
254 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
255 *desc_to_use = desc[handle];
256 (hw->cmq.csq.next_to_use)++;
257 if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
258 hw->cmq.csq.next_to_use = 0;
259 handle++;
260 }
261
262
263 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
264
265
266
267
268
269 if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
270 do {
271 if (hclge_cmd_csq_done(hw)) {
272 complete = true;
273 break;
274 }
275 udelay(1);
276 timeout++;
277 } while (timeout < hw->cmq.tx_timeout);
278 }
279
280 if (!complete) {
281 retval = -EAGAIN;
282 } else {
283 retval = hclge_cmd_check_retval(hw, desc, num, ntc);
284 }
285
286
287 handle = hclge_cmd_csq_clean(hw);
288 if (handle < 0)
289 retval = handle;
290 else if (handle != num)
291 dev_warn(&hdev->pdev->dev,
292 "cleaned %d, need to clean %d\n", handle, num);
293
294 spin_unlock_bh(&hw->cmq.csq.lock);
295
296 return retval;
297}
298
299static enum hclge_cmd_status hclge_cmd_query_firmware_version(
300 struct hclge_hw *hw, u32 *version)
301{
302 struct hclge_query_version_cmd *resp;
303 struct hclge_desc desc;
304 int ret;
305
306 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
307 resp = (struct hclge_query_version_cmd *)desc.data;
308
309 ret = hclge_cmd_send(hw, &desc, 1);
310 if (!ret)
311 *version = le32_to_cpu(resp->firmware);
312
313 return ret;
314}
315
316int hclge_cmd_queue_init(struct hclge_dev *hdev)
317{
318 int ret;
319
320
321 spin_lock_init(&hdev->hw.cmq.csq.lock);
322 spin_lock_init(&hdev->hw.cmq.crq.lock);
323
324
325 hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
326 hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
327
328
329 hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
330
331
332 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
333 if (ret) {
334 dev_err(&hdev->pdev->dev,
335 "CSQ ring setup error %d\n", ret);
336 return ret;
337 }
338
339 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
340 if (ret) {
341 dev_err(&hdev->pdev->dev,
342 "CRQ ring setup error %d\n", ret);
343 goto err_csq;
344 }
345
346 return 0;
347err_csq:
348 hclge_free_cmd_desc(&hdev->hw.cmq.csq);
349 return ret;
350}
351
352int hclge_cmd_init(struct hclge_dev *hdev)
353{
354 u32 version;
355 int ret;
356
357 spin_lock_bh(&hdev->hw.cmq.csq.lock);
358 spin_lock(&hdev->hw.cmq.crq.lock);
359
360 hdev->hw.cmq.csq.next_to_clean = 0;
361 hdev->hw.cmq.csq.next_to_use = 0;
362 hdev->hw.cmq.crq.next_to_clean = 0;
363 hdev->hw.cmq.crq.next_to_use = 0;
364
365 hclge_cmd_init_regs(&hdev->hw);
366
367 spin_unlock(&hdev->hw.cmq.crq.lock);
368 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
369
370 clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
371
372
373
374
375 if ((hclge_is_reset_pending(hdev))) {
376 ret = -EBUSY;
377 goto err_cmd_init;
378 }
379
380 ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
381 if (ret) {
382 dev_err(&hdev->pdev->dev,
383 "firmware version query failed %d\n", ret);
384 goto err_cmd_init;
385 }
386 hdev->fw_version = version;
387
388 dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
389
390 return 0;
391
392err_cmd_init:
393 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
394
395 return ret;
396}
397
398static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
399{
400 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
401 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
402 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
403 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
404 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
405 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
406 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
407 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
408 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
409 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
410}
411
412static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
413{
414 spin_lock(&ring->lock);
415 hclge_free_cmd_desc(ring);
416 spin_unlock(&ring->lock);
417}
418
419static void hclge_destroy_cmd_queue(struct hclge_hw *hw)
420{
421 hclge_destroy_queue(&hw->cmq.csq);
422 hclge_destroy_queue(&hw->cmq.crq);
423}
424
425void hclge_cmd_uninit(struct hclge_dev *hdev)
426{
427 spin_lock_bh(&hdev->hw.cmq.csq.lock);
428 spin_lock(&hdev->hw.cmq.crq.lock);
429 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
430 hclge_cmd_uninit_regs(&hdev->hw);
431 spin_unlock(&hdev->hw.cmq.crq.lock);
432 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
433
434 hclge_destroy_cmd_queue(&hdev->hw);
435}
436