linux/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
<<
>>
Prefs
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2020 Broadcom Limited
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 */
   9
  10#include <asm/byteorder.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmapool.h>
  13#include <linux/errno.h>
  14#include <linux/ethtool.h>
  15#include <linux/if_ether.h>
  16#include <linux/io.h>
  17#include <linux/irq.h>
  18#include <linux/kernel.h>
  19#include <linux/list.h>
  20#include <linux/netdevice.h>
  21#include <linux/pci.h>
  22#include <linux/skbuff.h>
  23
  24#include "bnxt_hsi.h"
  25#include "bnxt.h"
  26#include "bnxt_hwrm.h"
  27
  28static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type)
  29{
  30        return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL;
  31}
  32
  33/**
  34 * __hwrm_req_init() - Initialize an HWRM request.
  35 * @bp: The driver context.
  36 * @req: A pointer to the request pointer to initialize.
  37 * @req_type: The request type. This will be converted to the little endian
  38 *      before being written to the req_type field of the returned request.
  39 * @req_len: The length of the request to be allocated.
  40 *
  41 * Allocate DMA resources and initialize a new HWRM request object of the
  42 * given type. The response address field in the request is configured with
  43 * the DMA bus address that has been mapped for the response and the passed
  44 * request is pointed to kernel virtual memory mapped for the request (such
  45 * that short_input indirection can be accomplished without copying). The
  46 * request’s target and completion ring are initialized to default values and
  47 * can be overridden by writing to the returned request object directly.
  48 *
  49 * The initialized request can be further customized by writing to its fields
  50 * directly, taking care to covert such fields to little endian. The request
  51 * object will be consumed (and all its associated resources release) upon
  52 * passing it to hwrm_req_send() unless ownership of the request has been
  53 * claimed by the caller via a call to hwrm_req_hold(). If the request is not
  54 * consumed, either because it is never sent or because ownership has been
  55 * claimed, then it must be released by a call to hwrm_req_drop().
  56 *
  57 * Return: zero on success, negative error code otherwise:
  58 *      E2BIG: the type of request pointer is too large to fit.
  59 *      ENOMEM: an allocation failure occurred.
  60 */
  61int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len)
  62{
  63        struct bnxt_hwrm_ctx *ctx;
  64        dma_addr_t dma_handle;
  65        u8 *req_addr;
  66
  67        if (req_len > BNXT_HWRM_CTX_OFFSET)
  68                return -E2BIG;
  69
  70        req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
  71                                  &dma_handle);
  72        if (!req_addr)
  73                return -ENOMEM;
  74
  75        ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET);
  76        /* safety first, sentinel used to check for invalid requests */
  77        ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
  78        ctx->req_len = req_len;
  79        ctx->req = (struct input *)req_addr;
  80        ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET);
  81        ctx->dma_handle = dma_handle;
  82        ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
  83        ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT;
  84        ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
  85        ctx->gfp = GFP_KERNEL;
  86        ctx->slice_addr = NULL;
  87
  88        /* initialize common request fields */
  89        ctx->req->req_type = cpu_to_le16(req_type);
  90        ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET);
  91        ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING);
  92        ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET);
  93        *req = ctx->req;
  94
  95        return 0;
  96}
  97
  98static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr)
  99{
 100        void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET;
 101        struct input *req = (struct input *)req_addr;
 102        struct bnxt_hwrm_ctx *ctx = ctx_addr;
 103        u64 sentinel;
 104
 105        if (!req) {
 106                /* can only be due to software bug, be loud */
 107                netdev_err(bp->dev, "null HWRM request");
 108                dump_stack();
 109                return NULL;
 110        }
 111
 112        /* HWRM API has no type safety, verify sentinel to validate address */
 113        sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type));
 114        if (ctx->sentinel != sentinel) {
 115                /* can only be due to software bug, be loud */
 116                netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n",
 117                           (u32)le16_to_cpu(req->req_type));
 118                dump_stack();
 119                return NULL;
 120        }
 121
 122        return ctx;
 123}
 124
 125/**
 126 * hwrm_req_timeout() - Set the completion timeout for the request.
 127 * @bp: The driver context.
 128 * @req: The request to set the timeout.
 129 * @timeout: The timeout in milliseconds.
 130 *
 131 * Set the timeout associated with the request for subsequent calls to
 132 * hwrm_req_send(). Some requests are long running and require a different
 133 * timeout than the default.
 134 */
 135void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout)
 136{
 137        struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
 138
 139        if (ctx)
 140                ctx->timeout = timeout;
 141}
 142
 143/**
 144 * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices.
 145 * @bp: The driver context.
 146 * @req: The request for which calls to hwrm_req_dma_slice() will have altered
 147 *      allocation flags.
 148 * @gfp: A bitmask of GFP flags. These flags are passed to dma_alloc_coherent()
 149 *      whenever it is used to allocate backing memory for slices. Note that
 150 *      calls to hwrm_req_dma_slice() will not always result in new allocations,
 151 *      however, memory suballocated from the request buffer is already
 152 *      __GFP_ZERO.
 153 *
 154 * Sets the GFP allocation flags associated with the request for subsequent
 155 * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO
 156 * for slice allocations.
 157 */
 158void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp)
 159{
 160        struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
 161
 162        if (ctx)
 163                ctx->gfp = gfp;
 164}
 165
 166/**
 167 * hwrm_req_replace() - Replace request data.
 168 * @bp: The driver context.
 169 * @req: The request to modify. A call to hwrm_req_replace() is conceptually
 170 *      an assignment of new_req to req. Subsequent calls to HWRM API functions,
 171 *      such as hwrm_req_send(), should thus use req and not new_req (in fact,
 172 *      calls to HWRM API functions will fail if non-managed request objects
 173 *      are passed).
 174 * @len: The length of new_req.
 175 * @new_req: The pre-built request to copy or reference.
 176 *
 177 * Replaces the request data in req with that of new_req. This is useful in
 178 * scenarios where a request object has already been constructed by a third
 179 * party prior to creating a resource managed request using hwrm_req_init().
 180 * Depending on the length, hwrm_req_replace() will either copy the new
 181 * request data into the DMA memory allocated for req, or it will simply
 182 * reference the new request and use it in lieu of req during subsequent
 183 * calls to hwrm_req_send(). The resource management is associated with
 184 * req and is independent of and does not apply to new_req. The caller must
 185 * ensure that the lifetime of new_req is least as long as req. Any slices
 186 * that may have been associated with the original request are released.
 187 *
 188 * Return: zero on success, negative error code otherwise:
 189 *     E2BIG: Request is too large.
 190 *     EINVAL: Invalid request to modify.
 191 */
 192int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len)
 193{
 194        struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
 195        struct input *internal_req = req;
 196        u16 req_type;
 197
 198        if (!ctx)
 199                return -EINVAL;
 200
 201        if (len > BNXT_HWRM_CTX_OFFSET)
 202                return -E2BIG;
 203
 204        /* free any existing slices */
 205        ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
 206        if (ctx->slice_addr) {
 207                dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
 208                                  ctx->slice_addr, ctx->slice_handle);
 209                ctx->slice_addr = NULL;
 210        }
 211        ctx->gfp = GFP_KERNEL;
 212
 213        if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) {
 214                memcpy(internal_req, new_req, len);
 215        } else {
 216                internal_req->req_type = ((struct input *)new_req)->req_type;
 217                ctx->req = new_req;
 218        }
 219
 220        ctx->req_len = len;
 221        ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
 222                                          BNXT_HWRM_RESP_OFFSET);
 223
 224        /* update sentinel for potentially new request type */
 225        req_type = le16_to_cpu(internal_req->req_type);
 226        ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
 227
 228        return 0;
 229}
 230
 231/**
 232 * hwrm_req_flags() - Set non internal flags of the ctx
 233 * @bp: The driver context.
 234 * @req: The request containing the HWRM command
 235 * @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set
 236 *
 237 * ctx flags can be used by the callers to instruct how the subsequent
 238 * hwrm_req_send() should behave. Example: callers can use hwrm_req_flags
 239 * with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send()
 240 * or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout
 241 * even if FW is not responding.
 242 * This generic function can be used to set any flag that is not an internal flag
 243 * of the HWRM module.
 244 */
 245void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags)
 246{
 247        struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
 248
 249        if (ctx)
 250                ctx->flags |= (flags & HWRM_API_FLAGS);
 251}
 252
 253/**
 254 * hwrm_req_hold() - Claim ownership of the request's resources.
 255 * @bp: The driver context.
 256 * @req: A pointer to the request to own. The request will no longer be
 257 *      consumed by calls to hwrm_req_send().
 258 *
 259 * Take ownership of the request. Ownership places responsibility on the
 260 * caller to free the resources associated with the request via a call to
 261 * hwrm_req_drop(). The caller taking ownership implies that a subsequent
 262 * call to hwrm_req_send() will not consume the request (ie. sending will
 263 * not free the associated resources if the request is owned by the caller).
 264 * Taking ownership returns a reference to the response. Retaining and
 265 * accessing the response data is the most common reason to take ownership
 266 * of the request. Ownership can also be acquired in order to reuse the same
 267 * request object across multiple invocations of hwrm_req_send().
 268 *
 269 * Return: A pointer to the response object.
 270 *
 271 * The resources associated with the response will remain available to the
 272 * caller until ownership of the request is relinquished via a call to
 273 * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if
 274 * a valid request is provided. A returned NULL value would imply a driver
 275 * bug and the implementation will complain loudly in the logs to aid in
 276 * detection. It should not be necessary to check the result for NULL.
 277 */
 278void *hwrm_req_hold(struct bnxt *bp, void *req)
 279{
 280        struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
 281        struct input *input = (struct input *)req;
 282
 283        if (!ctx)
 284                return NULL;
 285
 286        if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) {
 287                /* can only be due to software bug, be loud */
 288                netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n",
 289                           (u32)le16_to_cpu(input->req_type));
 290                dump_stack();
 291                return NULL;
 292        }
 293
 294        ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED;
 295        return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET;
 296}
 297
 298static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
 299{
 300        void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET;
 301        dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
 302
 303        /* unmap any auxiliary DMA slice */
 304        if (ctx->slice_addr)
 305                dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
 306                                  ctx->slice_addr, ctx->slice_handle);
 307
 308        /* invalidate, ensure ownership, sentinel and dma_handle are cleared */
 309        memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx));
 310
 311        /* return the buffer to the DMA pool */
 312        if (dma_handle)
 313                dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle);
 314}
 315
 316/**
 317 * hwrm_req_drop() - Release all resources associated with the request.
 318 * @bp: The driver context.
 319 * @req: The request to consume, releasing the associated resources. The
 320 *      request object, any slices, and its associated response are no
 321 *      longer valid.
 322 *
 323 * It is legal to call hwrm_req_drop() on an unowned request, provided it
 324 * has not already been consumed by hwrm_req_send() (for example, to release
 325 * an aborted request). A given request should not be dropped more than once,
 326 * nor should it be dropped after having been consumed by hwrm_req_send(). To
 327 * do so is an error (the context will not be found and a stack trace will be
 328 * rendered in the kernel log).
 329 */
 330void hwrm_req_drop(struct bnxt *bp, void *req)
 331{
 332        struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
 333
 334        if (ctx)
 335                __hwrm_ctx_drop(bp, ctx);
 336}
 337
 338static int __hwrm_to_stderr(u32 hwrm_err)
 339{
 340        switch (hwrm_err) {
 341        case HWRM_ERR_CODE_SUCCESS:
 342                return 0;
 343        case HWRM_ERR_CODE_RESOURCE_LOCKED:
 344                return -EROFS;
 345        case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
 346                return -EACCES;
 347        case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
 348                return -ENOSPC;
 349        case HWRM_ERR_CODE_INVALID_PARAMS:
 350        case HWRM_ERR_CODE_INVALID_FLAGS:
 351        case HWRM_ERR_CODE_INVALID_ENABLES:
 352        case HWRM_ERR_CODE_UNSUPPORTED_TLV:
 353        case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
 354                return -EINVAL;
 355        case HWRM_ERR_CODE_NO_BUFFER:
 356                return -ENOMEM;
 357        case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
 358        case HWRM_ERR_CODE_BUSY:
 359                return -EAGAIN;
 360        case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
 361                return -EOPNOTSUPP;
 362        default:
 363                return -EIO;
 364        }
 365}
 366
 367static struct bnxt_hwrm_wait_token *
 368__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst)
 369{
 370        struct bnxt_hwrm_wait_token *token;
 371
 372        token = kzalloc(sizeof(*token), GFP_KERNEL);
 373        if (!token)
 374                return NULL;
 375
 376        mutex_lock(&bp->hwrm_cmd_lock);
 377
 378        token->dst = dst;
 379        token->state = BNXT_HWRM_PENDING;
 380        if (dst == BNXT_HWRM_CHNL_CHIMP) {
 381                token->seq_id = bp->hwrm_cmd_seq++;
 382                hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list);
 383        } else {
 384                token->seq_id = bp->hwrm_cmd_kong_seq++;
 385        }
 386
 387        return token;
 388}
 389
 390static void
 391__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token)
 392{
 393        if (token->dst == BNXT_HWRM_CHNL_CHIMP) {
 394                hlist_del_rcu(&token->node);
 395                kfree_rcu(token, rcu);
 396        } else {
 397                kfree(token);
 398        }
 399        mutex_unlock(&bp->hwrm_cmd_lock);
 400}
 401
 402void
 403hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
 404{
 405        struct bnxt_hwrm_wait_token *token;
 406
 407        rcu_read_lock();
 408        hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) {
 409                if (token->seq_id == seq_id) {
 410                        WRITE_ONCE(token->state, state);
 411                        rcu_read_unlock();
 412                        return;
 413                }
 414        }
 415        rcu_read_unlock();
 416        netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
 417}
 418
 419static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
 420{
 421        u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
 422        enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP;
 423        u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
 424        struct bnxt_hwrm_wait_token *token = NULL;
 425        struct hwrm_short_input short_input = {0};
 426        u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
 427        unsigned int i, timeout, tmo_count;
 428        u32 *data = (u32 *)ctx->req;
 429        u32 msg_len = ctx->req_len;
 430        int rc = -EBUSY;
 431        u32 req_type;
 432        u16 len = 0;
 433        u8 *valid;
 434
 435        if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY)
 436                memset(ctx->resp, 0, PAGE_SIZE);
 437
 438        req_type = le16_to_cpu(ctx->req->req_type);
 439        if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET)
 440                goto exit;
 441
 442        if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
 443            msg_len > bp->hwrm_max_ext_req_len) {
 444                rc = -E2BIG;
 445                goto exit;
 446        }
 447
 448        if (bnxt_kong_hwrm_message(bp, ctx->req)) {
 449                dst = BNXT_HWRM_CHNL_KONG;
 450                bar_offset = BNXT_GRCPF_REG_KONG_COMM;
 451                doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
 452                if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
 453                        netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n",
 454                                   req_type);
 455                        rc = -EINVAL;
 456                        goto exit;
 457                }
 458        }
 459
 460        token = __hwrm_acquire_token(bp, dst);
 461        if (!token) {
 462                rc = -ENOMEM;
 463                goto exit;
 464        }
 465        ctx->req->seq_id = cpu_to_le16(token->seq_id);
 466
 467        if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
 468            msg_len > BNXT_HWRM_MAX_REQ_LEN) {
 469                short_input.req_type = ctx->req->req_type;
 470                short_input.signature =
 471                                cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
 472                short_input.size = cpu_to_le16(msg_len);
 473                short_input.req_addr = cpu_to_le64(ctx->dma_handle);
 474
 475                data = (u32 *)&short_input;
 476                msg_len = sizeof(short_input);
 477
 478                max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
 479        }
 480
 481        /* Ensure any associated DMA buffers are written before doorbell */
 482        wmb();
 483
 484        /* Write request msg to hwrm channel */
 485        __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
 486
 487        for (i = msg_len; i < max_req_len; i += 4)
 488                writel(0, bp->bar0 + bar_offset + i);
 489
 490        /* Ring channel doorbell */
 491        writel(1, bp->bar0 + doorbell_offset);
 492
 493        if (!pci_is_enabled(bp->pdev)) {
 494                rc = -ENODEV;
 495                goto exit;
 496        }
 497
 498        /* Limit timeout to an upper limit */
 499        timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT);
 500        /* convert timeout to usec */
 501        timeout *= 1000;
 502
 503        i = 0;
 504        /* Short timeout for the first few iterations:
 505         * number of loops = number of loops for short timeout +
 506         * number of loops for standard timeout.
 507         */
 508        tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
 509        timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
 510        tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
 511
 512        if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
 513                /* Wait until hwrm response cmpl interrupt is processed */
 514                while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE &&
 515                       i++ < tmo_count) {
 516                        /* Abort the wait for completion if the FW health
 517                         * check has failed.
 518                         */
 519                        if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
 520                                goto exit;
 521                        /* on first few passes, just barely sleep */
 522                        if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
 523                                usleep_range(HWRM_SHORT_MIN_TIMEOUT,
 524                                             HWRM_SHORT_MAX_TIMEOUT);
 525                        } else {
 526                                if (HWRM_WAIT_MUST_ABORT(bp, ctx))
 527                                        break;
 528                                usleep_range(HWRM_MIN_TIMEOUT,
 529                                             HWRM_MAX_TIMEOUT);
 530                        }
 531                }
 532
 533                if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
 534                        if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
 535                                netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
 536                                           le16_to_cpu(ctx->req->req_type));
 537                        goto exit;
 538                }
 539                len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
 540                valid = ((u8 *)ctx->resp) + len - 1;
 541        } else {
 542                __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
 543                int j;
 544
 545                /* Check if response len is updated */
 546                for (i = 0; i < tmo_count; i++) {
 547                        /* Abort the wait for completion if the FW health
 548                         * check has failed.
 549                         */
 550                        if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
 551                                goto exit;
 552
 553                        if (token &&
 554                            READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) {
 555                                __hwrm_release_token(bp, token);
 556                                token = NULL;
 557                        }
 558
 559                        len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
 560                        if (len) {
 561                                __le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
 562
 563                                if (resp_seq == ctx->req->seq_id)
 564                                        break;
 565                                if (resp_seq != seen_out_of_seq) {
 566                                        netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
 567                                                    le16_to_cpu(resp_seq),
 568                                                    le16_to_cpu(ctx->req->req_type),
 569                                                    le16_to_cpu(ctx->req->seq_id));
 570                                        seen_out_of_seq = resp_seq;
 571                                }
 572                        }
 573
 574                        /* on first few passes, just barely sleep */
 575                        if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
 576                                usleep_range(HWRM_SHORT_MIN_TIMEOUT,
 577                                             HWRM_SHORT_MAX_TIMEOUT);
 578                        } else {
 579                                if (HWRM_WAIT_MUST_ABORT(bp, ctx))
 580                                        goto timeout_abort;
 581                                usleep_range(HWRM_MIN_TIMEOUT,
 582                                             HWRM_MAX_TIMEOUT);
 583                        }
 584                }
 585
 586                if (i >= tmo_count) {
 587timeout_abort:
 588                        if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
 589                                netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
 590                                           hwrm_total_timeout(i),
 591                                           le16_to_cpu(ctx->req->req_type),
 592                                           le16_to_cpu(ctx->req->seq_id), len);
 593                        goto exit;
 594                }
 595
 596                /* Last byte of resp contains valid bit */
 597                valid = ((u8 *)ctx->resp) + len - 1;
 598                for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
 599                        /* make sure we read from updated DMA memory */
 600                        dma_rmb();
 601                        if (*valid)
 602                                break;
 603                        usleep_range(1, 5);
 604                }
 605
 606                if (j >= HWRM_VALID_BIT_DELAY_USEC) {
 607                        if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
 608                                netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
 609                                           hwrm_total_timeout(i),
 610                                           le16_to_cpu(ctx->req->req_type),
 611                                           le16_to_cpu(ctx->req->seq_id), len,
 612                                           *valid);
 613                        goto exit;
 614                }
 615        }
 616
 617        /* Zero valid bit for compatibility.  Valid bit in an older spec
 618         * may become a new field in a newer spec.  We must make sure that
 619         * a new field not implemented by old spec will read zero.
 620         */
 621        *valid = 0;
 622        rc = le16_to_cpu(ctx->resp->error_code);
 623        if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) {
 624                netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
 625                           le16_to_cpu(ctx->resp->req_type),
 626                           le16_to_cpu(ctx->resp->seq_id), rc);
 627        }
 628        rc = __hwrm_to_stderr(rc);
 629exit:
 630        if (token)
 631                __hwrm_release_token(bp, token);
 632        if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED)
 633                ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY;
 634        else
 635                __hwrm_ctx_drop(bp, ctx);
 636        return rc;
 637}
 638
 639/**
 640 * hwrm_req_send() - Execute an HWRM command.
 641 * @bp: The driver context.
 642 * @req: A pointer to the request to send. The DMA resources associated with
 643 *      the request will be released (ie. the request will be consumed) unless
 644 *      ownership of the request has been assumed by the caller via a call to
 645 *      hwrm_req_hold().
 646 *
 647 * Send an HWRM request to the device and wait for a response. The request is
 648 * consumed if it is not owned by the caller. This function will block until
 649 * the request has either completed or times out due to an error.
 650 *
 651 * Return: A result code.
 652 *
 653 * The result is zero on success, otherwise the negative error code indicates
 654 * one of the following errors:
 655 *      E2BIG: The request was too large.
 656 *      EBUSY: The firmware is in a fatal state or the request timed out
 657 *      EACCESS: HWRM access denied.
 658 *      ENOSPC: HWRM resource allocation error.
 659 *      EINVAL: Request parameters are invalid.
 660 *      ENOMEM: HWRM has no buffers.
 661 *      EAGAIN: HWRM busy or reset in progress.
 662 *      EOPNOTSUPP: Invalid request type.
 663 *      EIO: Any other error.
 664 * Error handling is orthogonal to request ownership. An unowned request will
 665 * still be consumed on error. If the caller owns the request, then the caller
 666 * is responsible for releasing the resources. Otherwise, hwrm_req_send() will
 667 * always consume the request.
 668 */
 669int hwrm_req_send(struct bnxt *bp, void *req)
 670{
 671        struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
 672
 673        if (!ctx)
 674                return -EINVAL;
 675
 676        return __hwrm_send(bp, ctx);
 677}
 678
 679/**
 680 * hwrm_req_send_silent() - A silent version of hwrm_req_send().
 681 * @bp: The driver context.
 682 * @req: The request to send without logging.
 683 *
 684 * The same as hwrm_req_send(), except that the request is silenced using
 685 * hwrm_req_silence() prior the call. This version of the function is
 686 * provided solely to preserve the legacy API’s flavor for this functionality.
 687 *
 688 * Return: A result code, see hwrm_req_send().
 689 */
 690int hwrm_req_send_silent(struct bnxt *bp, void *req)
 691{
 692        hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
 693        return hwrm_req_send(bp, req);
 694}
 695
 696/**
 697 * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory.
 698 * @bp: The driver context.
 699 * @req: The request for which indirect data will be associated.
 700 * @size: The size of the allocation.
 701 * @dma_handle: The bus address associated with the allocation. The HWRM API has
 702 *      no knowledge about the type of the request and so cannot infer how the
 703 *      caller intends to use the indirect data. Thus, the caller is
 704 *      responsible for configuring the request object appropriately to
 705 *      point to the associated indirect memory. Note, DMA handle has the
 706 *      same definition as it does in dma_alloc_coherent(), the caller is
 707 *      responsible for endian conversions via cpu_to_le64() before assigning
 708 *      this address.
 709 *
 710 * Allocates DMA mapped memory for indirect data related to a request. The
 711 * lifetime of the DMA resources will be bound to that of the request (ie.
 712 * they will be automatically released when the request is either consumed by
 713 * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are
 714 * efficiently suballocated out of the request buffer space, hence the name
 715 * slice, while larger requests are satisfied via an underlying call to
 716 * dma_alloc_coherent(). Multiple suballocations are supported, however, only
 717 * one externally mapped region is.
 718 *
 719 * Return: The kernel virtual address of the DMA mapping.
 720 */
 721void *
 722hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle)
 723{
 724        struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
 725        u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE;
 726        struct input *input = req;
 727        u8 *addr, *req_addr = req;
 728        u32 max_offset, offset;
 729
 730        if (!ctx)
 731                return NULL;
 732
 733        max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated;
 734        offset = max_offset - size;
 735        offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN);
 736        addr = req_addr + offset;
 737
 738        if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
 739                ctx->allocated = end - addr;
 740                *dma_handle = ctx->dma_handle + offset;
 741                return addr;
 742        }
 743
 744        /* could not suballocate from ctx buffer, try create a new mapping */
 745        if (ctx->slice_addr) {
 746                /* if one exists, can only be due to software bug, be loud */
 747                netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
 748                           (u32)le16_to_cpu(input->req_type));
 749                dump_stack();
 750                return NULL;
 751        }
 752
 753        addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp);
 754
 755        if (!addr)
 756                return NULL;
 757
 758        ctx->slice_addr = addr;
 759        ctx->slice_size = size;
 760        ctx->slice_handle = *dma_handle;
 761
 762        return addr;
 763}
 764