1
2
3
4
5
6
7
8
9
10#include <asm/byteorder.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmapool.h>
13#include <linux/errno.h>
14#include <linux/ethtool.h>
15#include <linux/if_ether.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/list.h>
20#include <linux/netdevice.h>
21#include <linux/pci.h>
22#include <linux/skbuff.h>
23
24#include "bnxt_hsi.h"
25#include "bnxt.h"
26#include "bnxt_hwrm.h"
27
28static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type)
29{
30 return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL;
31}
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len)
62{
63 struct bnxt_hwrm_ctx *ctx;
64 dma_addr_t dma_handle;
65 u8 *req_addr;
66
67 if (req_len > BNXT_HWRM_CTX_OFFSET)
68 return -E2BIG;
69
70 req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
71 &dma_handle);
72 if (!req_addr)
73 return -ENOMEM;
74
75 ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET);
76
77 ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
78 ctx->req_len = req_len;
79 ctx->req = (struct input *)req_addr;
80 ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET);
81 ctx->dma_handle = dma_handle;
82 ctx->flags = 0;
83 ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT;
84 ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
85 ctx->gfp = GFP_KERNEL;
86 ctx->slice_addr = NULL;
87
88
89 ctx->req->req_type = cpu_to_le16(req_type);
90 ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET);
91 ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING);
92 ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET);
93 *req = ctx->req;
94
95 return 0;
96}
97
98static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr)
99{
100 void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET;
101 struct input *req = (struct input *)req_addr;
102 struct bnxt_hwrm_ctx *ctx = ctx_addr;
103 u64 sentinel;
104
105 if (!req) {
106
107 netdev_err(bp->dev, "null HWRM request");
108 dump_stack();
109 return NULL;
110 }
111
112
113 sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type));
114 if (ctx->sentinel != sentinel) {
115
116 netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n",
117 (u32)le16_to_cpu(req->req_type));
118 dump_stack();
119 return NULL;
120 }
121
122 return ctx;
123}
124
125
126
127
128
129
130
131
132
133
134
135void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout)
136{
137 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
138
139 if (ctx)
140 ctx->timeout = timeout;
141}
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp)
159{
160 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
161
162 if (ctx)
163 ctx->gfp = gfp;
164}
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len)
193{
194 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
195 struct input *internal_req = req;
196 u16 req_type;
197
198 if (!ctx)
199 return -EINVAL;
200
201 if (len > BNXT_HWRM_CTX_OFFSET)
202 return -E2BIG;
203
204
205 ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
206 if (ctx->slice_addr) {
207 dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
208 ctx->slice_addr, ctx->slice_handle);
209 ctx->slice_addr = NULL;
210 }
211 ctx->gfp = GFP_KERNEL;
212
213 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) {
214 memcpy(internal_req, new_req, len);
215 } else {
216 internal_req->req_type = ((struct input *)new_req)->req_type;
217 ctx->req = new_req;
218 }
219
220 ctx->req_len = len;
221 ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
222 BNXT_HWRM_RESP_OFFSET);
223
224
225 req_type = le16_to_cpu(internal_req->req_type);
226 ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
227
228 return 0;
229}
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags)
246{
247 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
248
249 if (ctx)
250 ctx->flags |= (flags & HWRM_API_FLAGS);
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278void *hwrm_req_hold(struct bnxt *bp, void *req)
279{
280 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
281 struct input *input = (struct input *)req;
282
283 if (!ctx)
284 return NULL;
285
286 if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) {
287
288 netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n",
289 (u32)le16_to_cpu(input->req_type));
290 dump_stack();
291 return NULL;
292 }
293
294 ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED;
295 return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET;
296}
297
298static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
299{
300 void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET;
301 dma_addr_t dma_handle = ctx->dma_handle;
302
303
304 if (ctx->slice_addr)
305 dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
306 ctx->slice_addr, ctx->slice_handle);
307
308
309 memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx));
310
311
312 if (dma_handle)
313 dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle);
314}
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330void hwrm_req_drop(struct bnxt *bp, void *req)
331{
332 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
333
334 if (ctx)
335 __hwrm_ctx_drop(bp, ctx);
336}
337
338static int __hwrm_to_stderr(u32 hwrm_err)
339{
340 switch (hwrm_err) {
341 case HWRM_ERR_CODE_SUCCESS:
342 return 0;
343 case HWRM_ERR_CODE_RESOURCE_LOCKED:
344 return -EROFS;
345 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
346 return -EACCES;
347 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
348 return -ENOSPC;
349 case HWRM_ERR_CODE_INVALID_PARAMS:
350 case HWRM_ERR_CODE_INVALID_FLAGS:
351 case HWRM_ERR_CODE_INVALID_ENABLES:
352 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
353 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
354 return -EINVAL;
355 case HWRM_ERR_CODE_NO_BUFFER:
356 return -ENOMEM;
357 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
358 case HWRM_ERR_CODE_BUSY:
359 return -EAGAIN;
360 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
361 return -EOPNOTSUPP;
362 default:
363 return -EIO;
364 }
365}
366
367static struct bnxt_hwrm_wait_token *
368__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst)
369{
370 struct bnxt_hwrm_wait_token *token;
371
372 token = kzalloc(sizeof(*token), GFP_KERNEL);
373 if (!token)
374 return NULL;
375
376 mutex_lock(&bp->hwrm_cmd_lock);
377
378 token->dst = dst;
379 token->state = BNXT_HWRM_PENDING;
380 if (dst == BNXT_HWRM_CHNL_CHIMP) {
381 token->seq_id = bp->hwrm_cmd_seq++;
382 hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list);
383 } else {
384 token->seq_id = bp->hwrm_cmd_kong_seq++;
385 }
386
387 return token;
388}
389
390static void
391__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token)
392{
393 if (token->dst == BNXT_HWRM_CHNL_CHIMP) {
394 hlist_del_rcu(&token->node);
395 kfree_rcu(token, rcu);
396 } else {
397 kfree(token);
398 }
399 mutex_unlock(&bp->hwrm_cmd_lock);
400}
401
402void
403hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
404{
405 struct bnxt_hwrm_wait_token *token;
406
407 rcu_read_lock();
408 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) {
409 if (token->seq_id == seq_id) {
410 WRITE_ONCE(token->state, state);
411 rcu_read_unlock();
412 return;
413 }
414 }
415 rcu_read_unlock();
416 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
417}
418
419static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
420{
421 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
422 enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP;
423 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
424 struct bnxt_hwrm_wait_token *token = NULL;
425 struct hwrm_short_input short_input = {0};
426 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
427 unsigned int i, timeout, tmo_count;
428 u32 *data = (u32 *)ctx->req;
429 u32 msg_len = ctx->req_len;
430 int rc = -EBUSY;
431 u32 req_type;
432 u16 len = 0;
433 u8 *valid;
434
435 if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY)
436 memset(ctx->resp, 0, PAGE_SIZE);
437
438 req_type = le16_to_cpu(ctx->req->req_type);
439 if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET)
440 goto exit;
441
442 if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
443 msg_len > bp->hwrm_max_ext_req_len) {
444 rc = -E2BIG;
445 goto exit;
446 }
447
448 if (bnxt_kong_hwrm_message(bp, ctx->req)) {
449 dst = BNXT_HWRM_CHNL_KONG;
450 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
451 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
452 if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
453 netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n",
454 req_type);
455 rc = -EINVAL;
456 goto exit;
457 }
458 }
459
460 token = __hwrm_acquire_token(bp, dst);
461 if (!token) {
462 rc = -ENOMEM;
463 goto exit;
464 }
465 ctx->req->seq_id = cpu_to_le16(token->seq_id);
466
467 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
468 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
469 short_input.req_type = ctx->req->req_type;
470 short_input.signature =
471 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
472 short_input.size = cpu_to_le16(msg_len);
473 short_input.req_addr = cpu_to_le64(ctx->dma_handle);
474
475 data = (u32 *)&short_input;
476 msg_len = sizeof(short_input);
477
478 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
479 }
480
481
482 wmb();
483
484
485 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
486
487 for (i = msg_len; i < max_req_len; i += 4)
488 writel(0, bp->bar0 + bar_offset + i);
489
490
491 writel(1, bp->bar0 + doorbell_offset);
492
493 if (!pci_is_enabled(bp->pdev)) {
494 rc = -ENODEV;
495 goto exit;
496 }
497
498
499 timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT);
500
501 timeout *= 1000;
502
503 i = 0;
504
505
506
507
508 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
509 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
510 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
511
512 if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
513
514 while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE &&
515 i++ < tmo_count) {
516
517
518
519 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
520 goto exit;
521
522 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
523 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
524 HWRM_SHORT_MAX_TIMEOUT);
525 } else {
526 if (HWRM_WAIT_MUST_ABORT(bp, ctx))
527 break;
528 usleep_range(HWRM_MIN_TIMEOUT,
529 HWRM_MAX_TIMEOUT);
530 }
531 }
532
533 if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
534 if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
535 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
536 le16_to_cpu(ctx->req->req_type));
537 goto exit;
538 }
539 len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
540 valid = ((u8 *)ctx->resp) + len - 1;
541 } else {
542 __le16 seen_out_of_seq = ctx->req->seq_id;
543 int j;
544
545
546 for (i = 0; i < tmo_count; i++) {
547
548
549
550 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
551 goto exit;
552
553 if (token &&
554 READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) {
555 __hwrm_release_token(bp, token);
556 token = NULL;
557 }
558
559 len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
560 if (len) {
561 __le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
562
563 if (resp_seq == ctx->req->seq_id)
564 break;
565 if (resp_seq != seen_out_of_seq) {
566 netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
567 le16_to_cpu(resp_seq),
568 le16_to_cpu(ctx->req->req_type),
569 le16_to_cpu(ctx->req->seq_id));
570 seen_out_of_seq = resp_seq;
571 }
572 }
573
574
575 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
576 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
577 HWRM_SHORT_MAX_TIMEOUT);
578 } else {
579 if (HWRM_WAIT_MUST_ABORT(bp, ctx))
580 goto timeout_abort;
581 usleep_range(HWRM_MIN_TIMEOUT,
582 HWRM_MAX_TIMEOUT);
583 }
584 }
585
586 if (i >= tmo_count) {
587timeout_abort:
588 if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
589 netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
590 hwrm_total_timeout(i),
591 le16_to_cpu(ctx->req->req_type),
592 le16_to_cpu(ctx->req->seq_id), len);
593 goto exit;
594 }
595
596
597 valid = ((u8 *)ctx->resp) + len - 1;
598 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
599
600 dma_rmb();
601 if (*valid)
602 break;
603 usleep_range(1, 5);
604 }
605
606 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
607 if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
608 netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
609 hwrm_total_timeout(i),
610 le16_to_cpu(ctx->req->req_type),
611 le16_to_cpu(ctx->req->seq_id), len,
612 *valid);
613 goto exit;
614 }
615 }
616
617
618
619
620
621 *valid = 0;
622 rc = le16_to_cpu(ctx->resp->error_code);
623 if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) {
624 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
625 le16_to_cpu(ctx->resp->req_type),
626 le16_to_cpu(ctx->resp->seq_id), rc);
627 }
628 rc = __hwrm_to_stderr(rc);
629exit:
630 if (token)
631 __hwrm_release_token(bp, token);
632 if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED)
633 ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY;
634 else
635 __hwrm_ctx_drop(bp, ctx);
636 return rc;
637}
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669int hwrm_req_send(struct bnxt *bp, void *req)
670{
671 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
672
673 if (!ctx)
674 return -EINVAL;
675
676 return __hwrm_send(bp, ctx);
677}
678
679
680
681
682
683
684
685
686
687
688
689
690int hwrm_req_send_silent(struct bnxt *bp, void *req)
691{
692 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
693 return hwrm_req_send(bp, req);
694}
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721void *
722hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle)
723{
724 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
725 u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE;
726 struct input *input = req;
727 u8 *addr, *req_addr = req;
728 u32 max_offset, offset;
729
730 if (!ctx)
731 return NULL;
732
733 max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated;
734 offset = max_offset - size;
735 offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN);
736 addr = req_addr + offset;
737
738 if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
739 ctx->allocated = end - addr;
740 *dma_handle = ctx->dma_handle + offset;
741 return addr;
742 }
743
744
745 if (ctx->slice_addr) {
746
747 netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
748 (u32)le16_to_cpu(input->req_type));
749 dump_stack();
750 return NULL;
751 }
752
753 addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp);
754
755 if (!addr)
756 return NULL;
757
758 ctx->slice_addr = addr;
759 ctx->slice_size = size;
760 ctx->slice_handle = *dma_handle;
761
762 return addr;
763}
764