1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/errno.h>
36#include <linux/pci.h>
37#include <linux/dma-mapping.h>
38#include <linux/slab.h>
39#include <linux/delay.h>
40#include <linux/random.h>
41#include <linux/io-mapping.h>
42#include <linux/mlx5/driver.h>
43#include <linux/debugfs.h>
44
45#include "mlx5_core.h"
46
47enum {
48 CMD_IF_REV = 5,
49};
50
51enum {
52 CMD_MODE_POLLING,
53 CMD_MODE_EVENTS
54};
55
56enum {
57 NUM_LONG_LISTS = 2,
58 NUM_MED_LISTS = 64,
59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE,
61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
62};
63
64enum {
65 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
76};
77
78static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
79 struct mlx5_cmd_msg *in,
80 struct mlx5_cmd_msg *out,
81 void *uout, int uout_size,
82 mlx5_cmd_cbk_t cbk,
83 void *context, int page_queue)
84{
85 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
86 struct mlx5_cmd_work_ent *ent;
87
88 ent = kzalloc(sizeof(*ent), alloc_flags);
89 if (!ent)
90 return ERR_PTR(-ENOMEM);
91
92 ent->in = in;
93 ent->out = out;
94 ent->uout = uout;
95 ent->uout_size = uout_size;
96 ent->callback = cbk;
97 ent->context = context;
98 ent->cmd = cmd;
99 ent->page_queue = page_queue;
100
101 return ent;
102}
103
104static u8 alloc_token(struct mlx5_cmd *cmd)
105{
106 u8 token;
107
108 spin_lock(&cmd->token_lock);
109 cmd->token++;
110 if (cmd->token == 0)
111 cmd->token++;
112 token = cmd->token;
113 spin_unlock(&cmd->token_lock);
114
115 return token;
116}
117
118static int alloc_ent(struct mlx5_cmd *cmd)
119{
120 unsigned long flags;
121 int ret;
122
123 spin_lock_irqsave(&cmd->alloc_lock, flags);
124 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
125 if (ret < cmd->max_reg_cmds)
126 clear_bit(ret, &cmd->bitmask);
127 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
128
129 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
130}
131
132static void free_ent(struct mlx5_cmd *cmd, int idx)
133{
134 unsigned long flags;
135
136 spin_lock_irqsave(&cmd->alloc_lock, flags);
137 set_bit(idx, &cmd->bitmask);
138 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
139}
140
141static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
142{
143 return cmd->cmd_buf + (idx << cmd->log_stride);
144}
145
146static u8 xor8_buf(void *buf, int len)
147{
148 u8 *ptr = buf;
149 u8 sum = 0;
150 int i;
151
152 for (i = 0; i < len; i++)
153 sum ^= ptr[i];
154
155 return sum;
156}
157
158static int verify_block_sig(struct mlx5_cmd_prot_block *block)
159{
160 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
161 return -EINVAL;
162
163 if (xor8_buf(block, sizeof(*block)) != 0xff)
164 return -EINVAL;
165
166 return 0;
167}
168
169static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
170 int csum)
171{
172 block->token = token;
173 if (csum) {
174 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
175 sizeof(block->data) - 2);
176 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
177 }
178}
179
180static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
181{
182 struct mlx5_cmd_mailbox *next = msg->next;
183
184 while (next) {
185 calc_block_sig(next->buf, token, csum);
186 next = next->next;
187 }
188}
189
190static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
191{
192 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
193 calc_chain_sig(ent->in, ent->token, csum);
194 calc_chain_sig(ent->out, ent->token, csum);
195}
196
197static void poll_timeout(struct mlx5_cmd_work_ent *ent)
198{
199 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
200 u8 own;
201
202 do {
203 own = ent->lay->status_own;
204 if (!(own & CMD_OWNER_HW)) {
205 ent->ret = 0;
206 return;
207 }
208 usleep_range(5000, 10000);
209 } while (time_before(jiffies, poll_end));
210
211 ent->ret = -ETIMEDOUT;
212}
213
214static void free_cmd(struct mlx5_cmd_work_ent *ent)
215{
216 kfree(ent);
217}
218
219
220static int verify_signature(struct mlx5_cmd_work_ent *ent)
221{
222 struct mlx5_cmd_mailbox *next = ent->out->next;
223 int err;
224 u8 sig;
225
226 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
227 if (sig != 0xff)
228 return -EINVAL;
229
230 while (next) {
231 err = verify_block_sig(next->buf);
232 if (err)
233 return err;
234
235 next = next->next;
236 }
237
238 return 0;
239}
240
241static void dump_buf(void *buf, int size, int data_only, int offset)
242{
243 __be32 *p = buf;
244 int i;
245
246 for (i = 0; i < size; i += 16) {
247 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
248 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
249 be32_to_cpu(p[3]));
250 p += 4;
251 offset += 16;
252 }
253 if (!data_only)
254 pr_debug("\n");
255}
256
257enum {
258 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
259 MLX5_DRIVER_SYND = 0xbadd00de,
260};
261
262static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
263 u32 *synd, u8 *status)
264{
265 *synd = 0;
266 *status = 0;
267
268 switch (op) {
269 case MLX5_CMD_OP_TEARDOWN_HCA:
270 case MLX5_CMD_OP_DISABLE_HCA:
271 case MLX5_CMD_OP_MANAGE_PAGES:
272 case MLX5_CMD_OP_DESTROY_MKEY:
273 case MLX5_CMD_OP_DESTROY_EQ:
274 case MLX5_CMD_OP_DESTROY_CQ:
275 case MLX5_CMD_OP_DESTROY_QP:
276 case MLX5_CMD_OP_DESTROY_PSV:
277 case MLX5_CMD_OP_DESTROY_SRQ:
278 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
279 case MLX5_CMD_OP_DESTROY_DCT:
280 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
281 case MLX5_CMD_OP_DEALLOC_PD:
282 case MLX5_CMD_OP_DEALLOC_UAR:
283 case MLX5_CMD_OP_DETTACH_FROM_MCG:
284 case MLX5_CMD_OP_DEALLOC_XRCD:
285 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
286 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
287 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
288 case MLX5_CMD_OP_DESTROY_TIR:
289 case MLX5_CMD_OP_DESTROY_SQ:
290 case MLX5_CMD_OP_DESTROY_RQ:
291 case MLX5_CMD_OP_DESTROY_RMP:
292 case MLX5_CMD_OP_DESTROY_TIS:
293 case MLX5_CMD_OP_DESTROY_RQT:
294 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
297 return MLX5_CMD_STAT_OK;
298
299 case MLX5_CMD_OP_QUERY_HCA_CAP:
300 case MLX5_CMD_OP_QUERY_ADAPTER:
301 case MLX5_CMD_OP_INIT_HCA:
302 case MLX5_CMD_OP_ENABLE_HCA:
303 case MLX5_CMD_OP_QUERY_PAGES:
304 case MLX5_CMD_OP_SET_HCA_CAP:
305 case MLX5_CMD_OP_QUERY_ISSI:
306 case MLX5_CMD_OP_SET_ISSI:
307 case MLX5_CMD_OP_CREATE_MKEY:
308 case MLX5_CMD_OP_QUERY_MKEY:
309 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
310 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
311 case MLX5_CMD_OP_CREATE_EQ:
312 case MLX5_CMD_OP_QUERY_EQ:
313 case MLX5_CMD_OP_GEN_EQE:
314 case MLX5_CMD_OP_CREATE_CQ:
315 case MLX5_CMD_OP_QUERY_CQ:
316 case MLX5_CMD_OP_MODIFY_CQ:
317 case MLX5_CMD_OP_CREATE_QP:
318 case MLX5_CMD_OP_RST2INIT_QP:
319 case MLX5_CMD_OP_INIT2RTR_QP:
320 case MLX5_CMD_OP_RTR2RTS_QP:
321 case MLX5_CMD_OP_RTS2RTS_QP:
322 case MLX5_CMD_OP_SQERR2RTS_QP:
323 case MLX5_CMD_OP_2ERR_QP:
324 case MLX5_CMD_OP_2RST_QP:
325 case MLX5_CMD_OP_QUERY_QP:
326 case MLX5_CMD_OP_SQD_RTS_QP:
327 case MLX5_CMD_OP_INIT2INIT_QP:
328 case MLX5_CMD_OP_CREATE_PSV:
329 case MLX5_CMD_OP_CREATE_SRQ:
330 case MLX5_CMD_OP_QUERY_SRQ:
331 case MLX5_CMD_OP_ARM_RQ:
332 case MLX5_CMD_OP_CREATE_XRC_SRQ:
333 case MLX5_CMD_OP_QUERY_XRC_SRQ:
334 case MLX5_CMD_OP_ARM_XRC_SRQ:
335 case MLX5_CMD_OP_CREATE_DCT:
336 case MLX5_CMD_OP_DRAIN_DCT:
337 case MLX5_CMD_OP_QUERY_DCT:
338 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
339 case MLX5_CMD_OP_QUERY_VPORT_STATE:
340 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
341 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
342 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
343 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
344 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
345 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
346 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
347 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
348 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
349 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
350 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
351 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
352 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
353 case MLX5_CMD_OP_QUERY_Q_COUNTER:
354 case MLX5_CMD_OP_ALLOC_PD:
355 case MLX5_CMD_OP_ALLOC_UAR:
356 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
357 case MLX5_CMD_OP_ACCESS_REG:
358 case MLX5_CMD_OP_ATTACH_TO_MCG:
359 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
360 case MLX5_CMD_OP_MAD_IFC:
361 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
362 case MLX5_CMD_OP_SET_MAD_DEMUX:
363 case MLX5_CMD_OP_NOP:
364 case MLX5_CMD_OP_ALLOC_XRCD:
365 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
366 case MLX5_CMD_OP_QUERY_CONG_STATUS:
367 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
368 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
369 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
370 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
371 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
372 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
373 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
374 case MLX5_CMD_OP_CREATE_TIR:
375 case MLX5_CMD_OP_MODIFY_TIR:
376 case MLX5_CMD_OP_QUERY_TIR:
377 case MLX5_CMD_OP_CREATE_SQ:
378 case MLX5_CMD_OP_MODIFY_SQ:
379 case MLX5_CMD_OP_QUERY_SQ:
380 case MLX5_CMD_OP_CREATE_RQ:
381 case MLX5_CMD_OP_MODIFY_RQ:
382 case MLX5_CMD_OP_QUERY_RQ:
383 case MLX5_CMD_OP_CREATE_RMP:
384 case MLX5_CMD_OP_MODIFY_RMP:
385 case MLX5_CMD_OP_QUERY_RMP:
386 case MLX5_CMD_OP_CREATE_TIS:
387 case MLX5_CMD_OP_MODIFY_TIS:
388 case MLX5_CMD_OP_QUERY_TIS:
389 case MLX5_CMD_OP_CREATE_RQT:
390 case MLX5_CMD_OP_MODIFY_RQT:
391 case MLX5_CMD_OP_QUERY_RQT:
392 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
393 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
394 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
395 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
396 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
397 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
398 *status = MLX5_DRIVER_STATUS_ABORTED;
399 *synd = MLX5_DRIVER_SYND;
400 return -EIO;
401 default:
402 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
403 return -EINVAL;
404 }
405}
406
407const char *mlx5_command_str(int command)
408{
409 switch (command) {
410 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
411 return "QUERY_HCA_VPORT_CONTEXT";
412
413 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
414 return "MODIFY_HCA_VPORT_CONTEXT";
415
416 case MLX5_CMD_OP_QUERY_HCA_CAP:
417 return "QUERY_HCA_CAP";
418
419 case MLX5_CMD_OP_SET_HCA_CAP:
420 return "SET_HCA_CAP";
421
422 case MLX5_CMD_OP_QUERY_ADAPTER:
423 return "QUERY_ADAPTER";
424
425 case MLX5_CMD_OP_INIT_HCA:
426 return "INIT_HCA";
427
428 case MLX5_CMD_OP_TEARDOWN_HCA:
429 return "TEARDOWN_HCA";
430
431 case MLX5_CMD_OP_ENABLE_HCA:
432 return "MLX5_CMD_OP_ENABLE_HCA";
433
434 case MLX5_CMD_OP_DISABLE_HCA:
435 return "MLX5_CMD_OP_DISABLE_HCA";
436
437 case MLX5_CMD_OP_QUERY_PAGES:
438 return "QUERY_PAGES";
439
440 case MLX5_CMD_OP_MANAGE_PAGES:
441 return "MANAGE_PAGES";
442
443 case MLX5_CMD_OP_CREATE_MKEY:
444 return "CREATE_MKEY";
445
446 case MLX5_CMD_OP_QUERY_MKEY:
447 return "QUERY_MKEY";
448
449 case MLX5_CMD_OP_DESTROY_MKEY:
450 return "DESTROY_MKEY";
451
452 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
453 return "QUERY_SPECIAL_CONTEXTS";
454
455 case MLX5_CMD_OP_CREATE_EQ:
456 return "CREATE_EQ";
457
458 case MLX5_CMD_OP_DESTROY_EQ:
459 return "DESTROY_EQ";
460
461 case MLX5_CMD_OP_QUERY_EQ:
462 return "QUERY_EQ";
463
464 case MLX5_CMD_OP_CREATE_CQ:
465 return "CREATE_CQ";
466
467 case MLX5_CMD_OP_DESTROY_CQ:
468 return "DESTROY_CQ";
469
470 case MLX5_CMD_OP_QUERY_CQ:
471 return "QUERY_CQ";
472
473 case MLX5_CMD_OP_MODIFY_CQ:
474 return "MODIFY_CQ";
475
476 case MLX5_CMD_OP_CREATE_QP:
477 return "CREATE_QP";
478
479 case MLX5_CMD_OP_DESTROY_QP:
480 return "DESTROY_QP";
481
482 case MLX5_CMD_OP_RST2INIT_QP:
483 return "RST2INIT_QP";
484
485 case MLX5_CMD_OP_INIT2RTR_QP:
486 return "INIT2RTR_QP";
487
488 case MLX5_CMD_OP_RTR2RTS_QP:
489 return "RTR2RTS_QP";
490
491 case MLX5_CMD_OP_RTS2RTS_QP:
492 return "RTS2RTS_QP";
493
494 case MLX5_CMD_OP_SQERR2RTS_QP:
495 return "SQERR2RTS_QP";
496
497 case MLX5_CMD_OP_2ERR_QP:
498 return "2ERR_QP";
499
500 case MLX5_CMD_OP_2RST_QP:
501 return "2RST_QP";
502
503 case MLX5_CMD_OP_QUERY_QP:
504 return "QUERY_QP";
505
506 case MLX5_CMD_OP_MAD_IFC:
507 return "MAD_IFC";
508
509 case MLX5_CMD_OP_INIT2INIT_QP:
510 return "INIT2INIT_QP";
511
512 case MLX5_CMD_OP_CREATE_PSV:
513 return "CREATE_PSV";
514
515 case MLX5_CMD_OP_DESTROY_PSV:
516 return "DESTROY_PSV";
517
518 case MLX5_CMD_OP_CREATE_SRQ:
519 return "CREATE_SRQ";
520
521 case MLX5_CMD_OP_DESTROY_SRQ:
522 return "DESTROY_SRQ";
523
524 case MLX5_CMD_OP_QUERY_SRQ:
525 return "QUERY_SRQ";
526
527 case MLX5_CMD_OP_ARM_RQ:
528 return "ARM_RQ";
529
530 case MLX5_CMD_OP_CREATE_XRC_SRQ:
531 return "CREATE_XRC_SRQ";
532
533 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
534 return "DESTROY_XRC_SRQ";
535
536 case MLX5_CMD_OP_QUERY_XRC_SRQ:
537 return "QUERY_XRC_SRQ";
538
539 case MLX5_CMD_OP_ARM_XRC_SRQ:
540 return "ARM_XRC_SRQ";
541
542 case MLX5_CMD_OP_ALLOC_PD:
543 return "ALLOC_PD";
544
545 case MLX5_CMD_OP_DEALLOC_PD:
546 return "DEALLOC_PD";
547
548 case MLX5_CMD_OP_ALLOC_UAR:
549 return "ALLOC_UAR";
550
551 case MLX5_CMD_OP_DEALLOC_UAR:
552 return "DEALLOC_UAR";
553
554 case MLX5_CMD_OP_ATTACH_TO_MCG:
555 return "ATTACH_TO_MCG";
556
557 case MLX5_CMD_OP_DETTACH_FROM_MCG:
558 return "DETTACH_FROM_MCG";
559
560 case MLX5_CMD_OP_ALLOC_XRCD:
561 return "ALLOC_XRCD";
562
563 case MLX5_CMD_OP_DEALLOC_XRCD:
564 return "DEALLOC_XRCD";
565
566 case MLX5_CMD_OP_ACCESS_REG:
567 return "MLX5_CMD_OP_ACCESS_REG";
568
569 case MLX5_CMD_OP_SET_WOL_ROL:
570 return "SET_WOL_ROL";
571
572 case MLX5_CMD_OP_QUERY_WOL_ROL:
573 return "QUERY_WOL_ROL";
574
575 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
576 return "ADD_VXLAN_UDP_DPORT";
577
578 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
579 return "DELETE_VXLAN_UDP_DPORT";
580
581 default: return "unknown command opcode";
582 }
583}
584
585static void dump_command(struct mlx5_core_dev *dev,
586 struct mlx5_cmd_work_ent *ent, int input)
587{
588 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
589 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
590 struct mlx5_cmd_mailbox *next = msg->next;
591 int data_only;
592 u32 offset = 0;
593 int dump_len;
594
595 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
596
597 if (data_only)
598 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
599 "dump command data %s(0x%x) %s\n",
600 mlx5_command_str(op), op,
601 input ? "INPUT" : "OUTPUT");
602 else
603 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
604 mlx5_command_str(op), op,
605 input ? "INPUT" : "OUTPUT");
606
607 if (data_only) {
608 if (input) {
609 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
610 offset += sizeof(ent->lay->in);
611 } else {
612 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
613 offset += sizeof(ent->lay->out);
614 }
615 } else {
616 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
617 offset += sizeof(*ent->lay);
618 }
619
620 while (next && offset < msg->len) {
621 if (data_only) {
622 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
623 dump_buf(next->buf, dump_len, 1, offset);
624 offset += MLX5_CMD_DATA_BLOCK_SIZE;
625 } else {
626 mlx5_core_dbg(dev, "command block:\n");
627 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
628 offset += sizeof(struct mlx5_cmd_prot_block);
629 }
630 next = next->next;
631 }
632
633 if (data_only)
634 pr_debug("\n");
635}
636
637static void cmd_work_handler(struct work_struct *work)
638{
639 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
640 struct mlx5_cmd *cmd = ent->cmd;
641 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
642 struct mlx5_cmd_layout *lay;
643 struct semaphore *sem;
644 unsigned long flags;
645
646 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
647 down(sem);
648 if (!ent->page_queue) {
649 ent->idx = alloc_ent(cmd);
650 if (ent->idx < 0) {
651 mlx5_core_err(dev, "failed to allocate command entry\n");
652 up(sem);
653 return;
654 }
655 } else {
656 ent->idx = cmd->max_reg_cmds;
657 spin_lock_irqsave(&cmd->alloc_lock, flags);
658 clear_bit(ent->idx, &cmd->bitmask);
659 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
660 }
661
662 ent->token = alloc_token(cmd);
663 cmd->ent_arr[ent->idx] = ent;
664 lay = get_inst(cmd, ent->idx);
665 ent->lay = lay;
666 memset(lay, 0, sizeof(*lay));
667 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
668 ent->op = be32_to_cpu(lay->in[0]) >> 16;
669 if (ent->in->next)
670 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
671 lay->inlen = cpu_to_be32(ent->in->len);
672 if (ent->out->next)
673 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
674 lay->outlen = cpu_to_be32(ent->out->len);
675 lay->type = MLX5_PCI_CMD_XPORT;
676 lay->token = ent->token;
677 lay->status_own = CMD_OWNER_HW;
678 set_signature(ent, !cmd->checksum_disabled);
679 dump_command(dev, ent, 1);
680 ent->ts1 = ktime_get_ns();
681
682
683 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
684 wmb();
685 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
686 mmiowb();
687
688 if (cmd->mode == CMD_MODE_POLLING) {
689 poll_timeout(ent);
690
691 rmb();
692 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
693 }
694}
695
696static const char *deliv_status_to_str(u8 status)
697{
698 switch (status) {
699 case MLX5_CMD_DELIVERY_STAT_OK:
700 return "no errors";
701 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
702 return "signature error";
703 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
704 return "token error";
705 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
706 return "bad block number";
707 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
708 return "output pointer not aligned to block size";
709 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
710 return "input pointer not aligned to block size";
711 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
712 return "firmware internal error";
713 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
714 return "command input length error";
715 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
716 return "command ouput length error";
717 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
718 return "reserved fields not cleared";
719 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
720 return "bad command descriptor type";
721 default:
722 return "unknown status code";
723 }
724}
725
726static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
727{
728 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
729
730 return be16_to_cpu(hdr->opcode);
731}
732
733static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
734{
735 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
736 struct mlx5_cmd *cmd = &dev->cmd;
737 int err;
738
739 if (cmd->mode == CMD_MODE_POLLING) {
740 wait_for_completion(&ent->done);
741 err = ent->ret;
742 } else {
743 if (!wait_for_completion_timeout(&ent->done, timeout))
744 err = -ETIMEDOUT;
745 else
746 err = 0;
747 }
748 if (err == -ETIMEDOUT) {
749 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
750 mlx5_command_str(msg_to_opcode(ent->in)),
751 msg_to_opcode(ent->in));
752 }
753 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
754 err, deliv_status_to_str(ent->status), ent->status);
755
756 return err;
757}
758
759static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
760{
761 return &out->syndrome;
762}
763
764static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
765{
766 return &out->status;
767}
768
769
770
771
772
773static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
774 struct mlx5_cmd_msg *out, void *uout, int uout_size,
775 mlx5_cmd_cbk_t callback,
776 void *context, int page_queue, u8 *status)
777{
778 struct mlx5_cmd *cmd = &dev->cmd;
779 struct mlx5_cmd_work_ent *ent;
780 struct mlx5_cmd_stats *stats;
781 int err = 0;
782 s64 ds;
783 u16 op;
784
785 if (callback && page_queue)
786 return -EINVAL;
787
788 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
789 page_queue);
790 if (IS_ERR(ent))
791 return PTR_ERR(ent);
792
793 if (!callback)
794 init_completion(&ent->done);
795
796 INIT_WORK(&ent->work, cmd_work_handler);
797 if (page_queue) {
798 cmd_work_handler(&ent->work);
799 } else if (!queue_work(cmd->wq, &ent->work)) {
800 mlx5_core_warn(dev, "failed to queue work\n");
801 err = -ENOMEM;
802 goto out_free;
803 }
804
805 if (!callback) {
806 err = wait_func(dev, ent);
807 if (err == -ETIMEDOUT)
808 goto out;
809
810 ds = ent->ts2 - ent->ts1;
811 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
812 if (op < ARRAY_SIZE(cmd->stats)) {
813 stats = &cmd->stats[op];
814 spin_lock_irq(&stats->lock);
815 stats->sum += ds;
816 ++stats->n;
817 spin_unlock_irq(&stats->lock);
818 }
819 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
820 "fw exec time for %s is %lld nsec\n",
821 mlx5_command_str(op), ds);
822 *status = ent->status;
823 free_cmd(ent);
824 }
825
826 return err;
827
828out_free:
829 free_cmd(ent);
830out:
831 return err;
832}
833
834static ssize_t dbg_write(struct file *filp, const char __user *buf,
835 size_t count, loff_t *pos)
836{
837 struct mlx5_core_dev *dev = filp->private_data;
838 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
839 char lbuf[3];
840 int err;
841
842 if (!dbg->in_msg || !dbg->out_msg)
843 return -ENOMEM;
844
845 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
846 return -EFAULT;
847
848 lbuf[sizeof(lbuf) - 1] = 0;
849
850 if (strcmp(lbuf, "go"))
851 return -EINVAL;
852
853 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
854
855 return err ? err : count;
856}
857
858
859static const struct file_operations fops = {
860 .owner = THIS_MODULE,
861 .open = simple_open,
862 .write = dbg_write,
863};
864
865static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
866{
867 struct mlx5_cmd_prot_block *block;
868 struct mlx5_cmd_mailbox *next;
869 int copy;
870
871 if (!to || !from)
872 return -ENOMEM;
873
874 copy = min_t(int, size, sizeof(to->first.data));
875 memcpy(to->first.data, from, copy);
876 size -= copy;
877 from += copy;
878
879 next = to->next;
880 while (size) {
881 if (!next) {
882
883 return -ENOMEM;
884 }
885
886 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
887 block = next->buf;
888 memcpy(block->data, from, copy);
889 from += copy;
890 size -= copy;
891 next = next->next;
892 }
893
894 return 0;
895}
896
897static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
898{
899 struct mlx5_cmd_prot_block *block;
900 struct mlx5_cmd_mailbox *next;
901 int copy;
902
903 if (!to || !from)
904 return -ENOMEM;
905
906 copy = min_t(int, size, sizeof(from->first.data));
907 memcpy(to, from->first.data, copy);
908 size -= copy;
909 to += copy;
910
911 next = from->next;
912 while (size) {
913 if (!next) {
914
915 return -ENOMEM;
916 }
917
918 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
919 block = next->buf;
920
921 memcpy(to, block->data, copy);
922 to += copy;
923 size -= copy;
924 next = next->next;
925 }
926
927 return 0;
928}
929
930static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
931 gfp_t flags)
932{
933 struct mlx5_cmd_mailbox *mailbox;
934
935 mailbox = kmalloc(sizeof(*mailbox), flags);
936 if (!mailbox)
937 return ERR_PTR(-ENOMEM);
938
939 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
940 &mailbox->dma);
941 if (!mailbox->buf) {
942 mlx5_core_dbg(dev, "failed allocation\n");
943 kfree(mailbox);
944 return ERR_PTR(-ENOMEM);
945 }
946 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
947 mailbox->next = NULL;
948
949 return mailbox;
950}
951
952static void free_cmd_box(struct mlx5_core_dev *dev,
953 struct mlx5_cmd_mailbox *mailbox)
954{
955 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
956 kfree(mailbox);
957}
958
959static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
960 gfp_t flags, int size)
961{
962 struct mlx5_cmd_mailbox *tmp, *head = NULL;
963 struct mlx5_cmd_prot_block *block;
964 struct mlx5_cmd_msg *msg;
965 int blen;
966 int err;
967 int n;
968 int i;
969
970 msg = kzalloc(sizeof(*msg), flags);
971 if (!msg)
972 return ERR_PTR(-ENOMEM);
973
974 blen = size - min_t(int, sizeof(msg->first.data), size);
975 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
976
977 for (i = 0; i < n; i++) {
978 tmp = alloc_cmd_box(dev, flags);
979 if (IS_ERR(tmp)) {
980 mlx5_core_warn(dev, "failed allocating block\n");
981 err = PTR_ERR(tmp);
982 goto err_alloc;
983 }
984
985 block = tmp->buf;
986 tmp->next = head;
987 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
988 block->block_num = cpu_to_be32(n - i - 1);
989 head = tmp;
990 }
991 msg->next = head;
992 msg->len = size;
993 return msg;
994
995err_alloc:
996 while (head) {
997 tmp = head->next;
998 free_cmd_box(dev, head);
999 head = tmp;
1000 }
1001 kfree(msg);
1002
1003 return ERR_PTR(err);
1004}
1005
1006static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1007 struct mlx5_cmd_msg *msg)
1008{
1009 struct mlx5_cmd_mailbox *head = msg->next;
1010 struct mlx5_cmd_mailbox *next;
1011
1012 while (head) {
1013 next = head->next;
1014 free_cmd_box(dev, head);
1015 head = next;
1016 }
1017 kfree(msg);
1018}
1019
1020static ssize_t data_write(struct file *filp, const char __user *buf,
1021 size_t count, loff_t *pos)
1022{
1023 struct mlx5_core_dev *dev = filp->private_data;
1024 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1025 void *ptr;
1026 int err;
1027
1028 if (*pos != 0)
1029 return -EINVAL;
1030
1031 kfree(dbg->in_msg);
1032 dbg->in_msg = NULL;
1033 dbg->inlen = 0;
1034
1035 ptr = kzalloc(count, GFP_KERNEL);
1036 if (!ptr)
1037 return -ENOMEM;
1038
1039 if (copy_from_user(ptr, buf, count)) {
1040 err = -EFAULT;
1041 goto out;
1042 }
1043 dbg->in_msg = ptr;
1044 dbg->inlen = count;
1045
1046 *pos = count;
1047
1048 return count;
1049
1050out:
1051 kfree(ptr);
1052 return err;
1053}
1054
1055static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1056 loff_t *pos)
1057{
1058 struct mlx5_core_dev *dev = filp->private_data;
1059 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1060 int copy;
1061
1062 if (*pos)
1063 return 0;
1064
1065 if (!dbg->out_msg)
1066 return -ENOMEM;
1067
1068 copy = min_t(int, count, dbg->outlen);
1069 if (copy_to_user(buf, dbg->out_msg, copy))
1070 return -EFAULT;
1071
1072 *pos += copy;
1073
1074 return copy;
1075}
1076
1077static const struct file_operations dfops = {
1078 .owner = THIS_MODULE,
1079 .open = simple_open,
1080 .write = data_write,
1081 .read = data_read,
1082};
1083
1084static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1085 loff_t *pos)
1086{
1087 struct mlx5_core_dev *dev = filp->private_data;
1088 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1089 char outlen[8];
1090 int err;
1091
1092 if (*pos)
1093 return 0;
1094
1095 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1096 if (err < 0)
1097 return err;
1098
1099 if (copy_to_user(buf, &outlen, err))
1100 return -EFAULT;
1101
1102 *pos += err;
1103
1104 return err;
1105}
1106
1107static ssize_t outlen_write(struct file *filp, const char __user *buf,
1108 size_t count, loff_t *pos)
1109{
1110 struct mlx5_core_dev *dev = filp->private_data;
1111 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1112 char outlen_str[8];
1113 int outlen;
1114 void *ptr;
1115 int err;
1116
1117 if (*pos != 0 || count > 6)
1118 return -EINVAL;
1119
1120 kfree(dbg->out_msg);
1121 dbg->out_msg = NULL;
1122 dbg->outlen = 0;
1123
1124 if (copy_from_user(outlen_str, buf, count))
1125 return -EFAULT;
1126
1127 outlen_str[7] = 0;
1128
1129 err = sscanf(outlen_str, "%d", &outlen);
1130 if (err < 0)
1131 return err;
1132
1133 ptr = kzalloc(outlen, GFP_KERNEL);
1134 if (!ptr)
1135 return -ENOMEM;
1136
1137 dbg->out_msg = ptr;
1138 dbg->outlen = outlen;
1139
1140 *pos = count;
1141
1142 return count;
1143}
1144
1145static const struct file_operations olfops = {
1146 .owner = THIS_MODULE,
1147 .open = simple_open,
1148 .write = outlen_write,
1149 .read = outlen_read,
1150};
1151
1152static void set_wqname(struct mlx5_core_dev *dev)
1153{
1154 struct mlx5_cmd *cmd = &dev->cmd;
1155
1156 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1157 dev_name(&dev->pdev->dev));
1158}
1159
1160static void clean_debug_files(struct mlx5_core_dev *dev)
1161{
1162 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1163
1164 if (!mlx5_debugfs_root)
1165 return;
1166
1167 mlx5_cmdif_debugfs_cleanup(dev);
1168 debugfs_remove_recursive(dbg->dbg_root);
1169}
1170
1171static int create_debugfs_files(struct mlx5_core_dev *dev)
1172{
1173 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1174 int err = -ENOMEM;
1175
1176 if (!mlx5_debugfs_root)
1177 return 0;
1178
1179 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1180 if (!dbg->dbg_root)
1181 return err;
1182
1183 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1184 dev, &dfops);
1185 if (!dbg->dbg_in)
1186 goto err_dbg;
1187
1188 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1189 dev, &dfops);
1190 if (!dbg->dbg_out)
1191 goto err_dbg;
1192
1193 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1194 dev, &olfops);
1195 if (!dbg->dbg_outlen)
1196 goto err_dbg;
1197
1198 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1199 &dbg->status);
1200 if (!dbg->dbg_status)
1201 goto err_dbg;
1202
1203 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1204 if (!dbg->dbg_run)
1205 goto err_dbg;
1206
1207 mlx5_cmdif_debugfs_init(dev);
1208
1209 return 0;
1210
1211err_dbg:
1212 clean_debug_files(dev);
1213 return err;
1214}
1215
1216void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1217{
1218 struct mlx5_cmd *cmd = &dev->cmd;
1219 int i;
1220
1221 for (i = 0; i < cmd->max_reg_cmds; i++)
1222 down(&cmd->sem);
1223
1224 down(&cmd->pages_sem);
1225
1226 flush_workqueue(cmd->wq);
1227
1228 cmd->mode = CMD_MODE_EVENTS;
1229
1230 up(&cmd->pages_sem);
1231 for (i = 0; i < cmd->max_reg_cmds; i++)
1232 up(&cmd->sem);
1233}
1234
1235void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1236{
1237 struct mlx5_cmd *cmd = &dev->cmd;
1238 int i;
1239
1240 for (i = 0; i < cmd->max_reg_cmds; i++)
1241 down(&cmd->sem);
1242
1243 down(&cmd->pages_sem);
1244
1245 flush_workqueue(cmd->wq);
1246 cmd->mode = CMD_MODE_POLLING;
1247
1248 up(&cmd->pages_sem);
1249 for (i = 0; i < cmd->max_reg_cmds; i++)
1250 up(&cmd->sem);
1251}
1252
1253static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1254{
1255 unsigned long flags;
1256
1257 if (msg->cache) {
1258 spin_lock_irqsave(&msg->cache->lock, flags);
1259 list_add_tail(&msg->list, &msg->cache->head);
1260 spin_unlock_irqrestore(&msg->cache->lock, flags);
1261 } else {
1262 mlx5_free_cmd_msg(dev, msg);
1263 }
1264}
1265
1266void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1267{
1268 struct mlx5_cmd *cmd = &dev->cmd;
1269 struct mlx5_cmd_work_ent *ent;
1270 mlx5_cmd_cbk_t callback;
1271 void *context;
1272 int err;
1273 int i;
1274 s64 ds;
1275 struct mlx5_cmd_stats *stats;
1276 unsigned long flags;
1277 unsigned long vector;
1278
1279
1280 vector = vec & 0xffffffff;
1281 for (i = 0; i < (1 << cmd->log_sz); i++) {
1282 if (test_bit(i, &vector)) {
1283 struct semaphore *sem;
1284
1285 ent = cmd->ent_arr[i];
1286 if (ent->page_queue)
1287 sem = &cmd->pages_sem;
1288 else
1289 sem = &cmd->sem;
1290 ent->ts2 = ktime_get_ns();
1291 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1292 dump_command(dev, ent, 0);
1293 if (!ent->ret) {
1294 if (!cmd->checksum_disabled)
1295 ent->ret = verify_signature(ent);
1296 else
1297 ent->ret = 0;
1298 if (vec & MLX5_TRIGGERED_CMD_COMP)
1299 ent->status = MLX5_DRIVER_STATUS_ABORTED;
1300 else
1301 ent->status = ent->lay->status_own >> 1;
1302
1303 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1304 ent->ret, deliv_status_to_str(ent->status), ent->status);
1305 }
1306 free_ent(cmd, ent->idx);
1307
1308 if (ent->callback) {
1309 ds = ent->ts2 - ent->ts1;
1310 if (ent->op < ARRAY_SIZE(cmd->stats)) {
1311 stats = &cmd->stats[ent->op];
1312 spin_lock_irqsave(&stats->lock, flags);
1313 stats->sum += ds;
1314 ++stats->n;
1315 spin_unlock_irqrestore(&stats->lock, flags);
1316 }
1317
1318 callback = ent->callback;
1319 context = ent->context;
1320 err = ent->ret;
1321 if (!err)
1322 err = mlx5_copy_from_msg(ent->uout,
1323 ent->out,
1324 ent->uout_size);
1325
1326 mlx5_free_cmd_msg(dev, ent->out);
1327 free_msg(dev, ent->in);
1328
1329 err = err ? err : ent->status;
1330 free_cmd(ent);
1331 callback(err, context);
1332 } else {
1333 complete(&ent->done);
1334 }
1335 up(sem);
1336 }
1337 }
1338}
1339EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1340
1341static int status_to_err(u8 status)
1342{
1343 return status ? -1 : 0;
1344}
1345
1346static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1347 gfp_t gfp)
1348{
1349 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1350 struct mlx5_cmd *cmd = &dev->cmd;
1351 struct cache_ent *ent = NULL;
1352
1353 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1354 ent = &cmd->cache.large;
1355 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1356 ent = &cmd->cache.med;
1357
1358 if (ent) {
1359 spin_lock_irq(&ent->lock);
1360 if (!list_empty(&ent->head)) {
1361 msg = list_entry(ent->head.next, typeof(*msg), list);
1362
1363
1364
1365 msg->len = in_size;
1366 list_del(&msg->list);
1367 }
1368 spin_unlock_irq(&ent->lock);
1369 }
1370
1371 if (IS_ERR(msg))
1372 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1373
1374 return msg;
1375}
1376
1377static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
1378{
1379 return be16_to_cpu(in->opcode);
1380}
1381
1382static int is_manage_pages(struct mlx5_inbox_hdr *in)
1383{
1384 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1385}
1386
1387static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1388 int out_size, mlx5_cmd_cbk_t callback, void *context)
1389{
1390 struct mlx5_cmd_msg *inb;
1391 struct mlx5_cmd_msg *outb;
1392 int pages_queue;
1393 gfp_t gfp;
1394 int err;
1395 u8 status = 0;
1396 u32 drv_synd;
1397
1398 if (pci_channel_offline(dev->pdev) ||
1399 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1400 err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
1401 *get_synd_ptr(out) = cpu_to_be32(drv_synd);
1402 *get_status_ptr(out) = status;
1403 return err;
1404 }
1405
1406 pages_queue = is_manage_pages(in);
1407 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1408
1409 inb = alloc_msg(dev, in_size, gfp);
1410 if (IS_ERR(inb)) {
1411 err = PTR_ERR(inb);
1412 return err;
1413 }
1414
1415 err = mlx5_copy_to_msg(inb, in, in_size);
1416 if (err) {
1417 mlx5_core_warn(dev, "err %d\n", err);
1418 goto out_in;
1419 }
1420
1421 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1422 if (IS_ERR(outb)) {
1423 err = PTR_ERR(outb);
1424 goto out_in;
1425 }
1426
1427 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1428 pages_queue, &status);
1429 if (err)
1430 goto out_out;
1431
1432 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1433 if (status) {
1434 err = status_to_err(status);
1435 goto out_out;
1436 }
1437
1438 if (!callback)
1439 err = mlx5_copy_from_msg(out, outb, out_size);
1440
1441out_out:
1442 if (!callback)
1443 mlx5_free_cmd_msg(dev, outb);
1444
1445out_in:
1446 if (!callback)
1447 free_msg(dev, inb);
1448 return err;
1449}
1450
1451int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1452 int out_size)
1453{
1454 return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1455}
1456EXPORT_SYMBOL(mlx5_cmd_exec);
1457
1458int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1459 void *out, int out_size, mlx5_cmd_cbk_t callback,
1460 void *context)
1461{
1462 return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1463}
1464EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1465
1466static void destroy_msg_cache(struct mlx5_core_dev *dev)
1467{
1468 struct mlx5_cmd *cmd = &dev->cmd;
1469 struct mlx5_cmd_msg *msg;
1470 struct mlx5_cmd_msg *n;
1471
1472 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1473 list_del(&msg->list);
1474 mlx5_free_cmd_msg(dev, msg);
1475 }
1476
1477 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1478 list_del(&msg->list);
1479 mlx5_free_cmd_msg(dev, msg);
1480 }
1481}
1482
1483static int create_msg_cache(struct mlx5_core_dev *dev)
1484{
1485 struct mlx5_cmd *cmd = &dev->cmd;
1486 struct mlx5_cmd_msg *msg;
1487 int err;
1488 int i;
1489
1490 spin_lock_init(&cmd->cache.large.lock);
1491 INIT_LIST_HEAD(&cmd->cache.large.head);
1492 spin_lock_init(&cmd->cache.med.lock);
1493 INIT_LIST_HEAD(&cmd->cache.med.head);
1494
1495 for (i = 0; i < NUM_LONG_LISTS; i++) {
1496 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1497 if (IS_ERR(msg)) {
1498 err = PTR_ERR(msg);
1499 goto ex_err;
1500 }
1501 msg->cache = &cmd->cache.large;
1502 list_add_tail(&msg->list, &cmd->cache.large.head);
1503 }
1504
1505 for (i = 0; i < NUM_MED_LISTS; i++) {
1506 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1507 if (IS_ERR(msg)) {
1508 err = PTR_ERR(msg);
1509 goto ex_err;
1510 }
1511 msg->cache = &cmd->cache.med;
1512 list_add_tail(&msg->list, &cmd->cache.med.head);
1513 }
1514
1515 return 0;
1516
1517ex_err:
1518 destroy_msg_cache(dev);
1519 return err;
1520}
1521
1522static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1523{
1524 struct device *ddev = &dev->pdev->dev;
1525
1526 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1527 &cmd->alloc_dma, GFP_KERNEL);
1528 if (!cmd->cmd_alloc_buf)
1529 return -ENOMEM;
1530
1531
1532 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1533 cmd->cmd_buf = cmd->cmd_alloc_buf;
1534 cmd->dma = cmd->alloc_dma;
1535 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1536 return 0;
1537 }
1538
1539 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1540 cmd->alloc_dma);
1541 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
1542 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1543 &cmd->alloc_dma, GFP_KERNEL);
1544 if (!cmd->cmd_alloc_buf)
1545 return -ENOMEM;
1546
1547 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1548 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1549 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1550 return 0;
1551}
1552
1553static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1554{
1555 struct device *ddev = &dev->pdev->dev;
1556
1557 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1558 cmd->alloc_dma);
1559}
1560
1561int mlx5_cmd_init(struct mlx5_core_dev *dev)
1562{
1563 int size = sizeof(struct mlx5_cmd_prot_block);
1564 int align = roundup_pow_of_two(size);
1565 struct mlx5_cmd *cmd = &dev->cmd;
1566 u32 cmd_h, cmd_l;
1567 u16 cmd_if_rev;
1568 int err;
1569 int i;
1570
1571 memset(cmd, 0, sizeof(*cmd));
1572 cmd_if_rev = cmdif_rev(dev);
1573 if (cmd_if_rev != CMD_IF_REV) {
1574 dev_err(&dev->pdev->dev,
1575 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1576 CMD_IF_REV, cmd_if_rev);
1577 return -EINVAL;
1578 }
1579
1580 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1581 if (!cmd->pool)
1582 return -ENOMEM;
1583
1584 err = alloc_cmd_page(dev, cmd);
1585 if (err)
1586 goto err_free_pool;
1587
1588 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1589 cmd->log_sz = cmd_l >> 4 & 0xf;
1590 cmd->log_stride = cmd_l & 0xf;
1591 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1592 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1593 1 << cmd->log_sz);
1594 err = -EINVAL;
1595 goto err_free_page;
1596 }
1597
1598 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1599 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1600 err = -EINVAL;
1601 goto err_free_page;
1602 }
1603
1604 cmd->checksum_disabled = 1;
1605 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1606 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1607
1608 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1609 if (cmd->cmdif_rev > CMD_IF_REV) {
1610 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1611 CMD_IF_REV, cmd->cmdif_rev);
1612 err = -ENOTSUPP;
1613 goto err_free_page;
1614 }
1615
1616 spin_lock_init(&cmd->alloc_lock);
1617 spin_lock_init(&cmd->token_lock);
1618 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1619 spin_lock_init(&cmd->stats[i].lock);
1620
1621 sema_init(&cmd->sem, cmd->max_reg_cmds);
1622 sema_init(&cmd->pages_sem, 1);
1623
1624 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1625 cmd_l = (u32)(cmd->dma);
1626 if (cmd_l & 0xfff) {
1627 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1628 err = -ENOMEM;
1629 goto err_free_page;
1630 }
1631
1632 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1633 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1634
1635
1636 wmb();
1637
1638 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1639
1640 cmd->mode = CMD_MODE_POLLING;
1641
1642 err = create_msg_cache(dev);
1643 if (err) {
1644 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1645 goto err_free_page;
1646 }
1647
1648 set_wqname(dev);
1649 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1650 if (!cmd->wq) {
1651 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1652 err = -ENOMEM;
1653 goto err_cache;
1654 }
1655
1656 err = create_debugfs_files(dev);
1657 if (err) {
1658 err = -ENOMEM;
1659 goto err_wq;
1660 }
1661
1662 return 0;
1663
1664err_wq:
1665 destroy_workqueue(cmd->wq);
1666
1667err_cache:
1668 destroy_msg_cache(dev);
1669
1670err_free_page:
1671 free_cmd_page(dev, cmd);
1672
1673err_free_pool:
1674 pci_pool_destroy(cmd->pool);
1675
1676 return err;
1677}
1678EXPORT_SYMBOL(mlx5_cmd_init);
1679
1680void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1681{
1682 struct mlx5_cmd *cmd = &dev->cmd;
1683
1684 clean_debug_files(dev);
1685 destroy_workqueue(cmd->wq);
1686 destroy_msg_cache(dev);
1687 free_cmd_page(dev, cmd);
1688 pci_pool_destroy(cmd->pool);
1689}
1690EXPORT_SYMBOL(mlx5_cmd_cleanup);
1691
1692static const char *cmd_status_str(u8 status)
1693{
1694 switch (status) {
1695 case MLX5_CMD_STAT_OK:
1696 return "OK";
1697 case MLX5_CMD_STAT_INT_ERR:
1698 return "internal error";
1699 case MLX5_CMD_STAT_BAD_OP_ERR:
1700 return "bad operation";
1701 case MLX5_CMD_STAT_BAD_PARAM_ERR:
1702 return "bad parameter";
1703 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1704 return "bad system state";
1705 case MLX5_CMD_STAT_BAD_RES_ERR:
1706 return "bad resource";
1707 case MLX5_CMD_STAT_RES_BUSY:
1708 return "resource busy";
1709 case MLX5_CMD_STAT_LIM_ERR:
1710 return "limits exceeded";
1711 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1712 return "bad resource state";
1713 case MLX5_CMD_STAT_IX_ERR:
1714 return "bad index";
1715 case MLX5_CMD_STAT_NO_RES_ERR:
1716 return "no resources";
1717 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1718 return "bad input length";
1719 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1720 return "bad output length";
1721 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1722 return "bad QP state";
1723 case MLX5_CMD_STAT_BAD_PKT_ERR:
1724 return "bad packet (discarded)";
1725 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1726 return "bad size too many outstanding CQEs";
1727 default:
1728 return "unknown status";
1729 }
1730}
1731
1732static int cmd_status_to_err(u8 status)
1733{
1734 switch (status) {
1735 case MLX5_CMD_STAT_OK: return 0;
1736 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1737 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
1738 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
1739 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1740 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1741 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1742 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
1743 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1744 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1745 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
1746 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
1747 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
1748 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
1749 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
1750 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
1751 default: return -EIO;
1752 }
1753}
1754
1755
1756int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1757{
1758 if (!hdr->status)
1759 return 0;
1760
1761 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1762 cmd_status_str(hdr->status), hdr->status,
1763 be32_to_cpu(hdr->syndrome));
1764
1765 return cmd_status_to_err(hdr->status);
1766}
1767
1768int mlx5_cmd_status_to_err_v2(void *ptr)
1769{
1770 u32 syndrome;
1771 u8 status;
1772
1773 status = be32_to_cpu(*(__be32 *)ptr) >> 24;
1774 if (!status)
1775 return 0;
1776
1777 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
1778
1779 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1780 cmd_status_str(status), status, syndrome);
1781
1782 return cmd_status_to_err(status);
1783}
1784