linux/drivers/soc/fsl/dpio/dpio-service.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * Copyright 2014-2016 Freescale Semiconductor Inc.
   4 * Copyright 2016 NXP
   5 *
   6 */
   7#include <linux/types.h>
   8#include <linux/fsl/mc.h>
   9#include <soc/fsl/dpaa2-io.h>
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/platform_device.h>
  13#include <linux/interrupt.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/slab.h>
  16
  17#include "dpio.h"
  18#include "qbman-portal.h"
  19
  20struct dpaa2_io {
  21        struct dpaa2_io_desc dpio_desc;
  22        struct qbman_swp_desc swp_desc;
  23        struct qbman_swp *swp;
  24        struct list_head node;
  25        /* protect against multiple management commands */
  26        spinlock_t lock_mgmt_cmd;
  27        /* protect notifications list */
  28        spinlock_t lock_notifications;
  29        struct list_head notifications;
  30        struct device *dev;
  31};
  32
  33struct dpaa2_io_store {
  34        unsigned int max;
  35        dma_addr_t paddr;
  36        struct dpaa2_dq *vaddr;
  37        void *alloced_addr;    /* unaligned value from kmalloc() */
  38        unsigned int idx;      /* position of the next-to-be-returned entry */
  39        struct qbman_swp *swp; /* portal used to issue VDQCR */
  40        struct device *dev;    /* device used for DMA mapping */
  41};
  42
  43/* keep a per cpu array of DPIOs for fast access */
  44static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
  45static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
  46static DEFINE_SPINLOCK(dpio_list_lock);
  47
  48static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
  49                                                     int cpu)
  50{
  51        if (d)
  52                return d;
  53
  54        if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
  55                return NULL;
  56
  57        /*
  58         * If cpu == -1, choose the current cpu, with no guarantees about
  59         * potentially being migrated away.
  60         */
  61        if (unlikely(cpu < 0))
  62                cpu = smp_processor_id();
  63
  64        /* If a specific cpu was requested, pick it up immediately */
  65        return dpio_by_cpu[cpu];
  66}
  67
  68static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
  69{
  70        if (d)
  71                return d;
  72
  73        spin_lock(&dpio_list_lock);
  74        d = list_entry(dpio_list.next, struct dpaa2_io, node);
  75        list_del(&d->node);
  76        list_add_tail(&d->node, &dpio_list);
  77        spin_unlock(&dpio_list_lock);
  78
  79        return d;
  80}
  81
  82/**
  83 * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
  84 * @cpu: the cpu id
  85 *
  86 * Return the affine dpaa2_io service, or NULL if there is no service affined
  87 * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
  88 * service.
  89 */
  90struct dpaa2_io *dpaa2_io_service_select(int cpu)
  91{
  92        if (cpu == DPAA2_IO_ANY_CPU)
  93                return service_select(NULL);
  94
  95        return service_select_by_cpu(NULL, cpu);
  96}
  97EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
  98
  99/**
 100 * dpaa2_io_create() - create a dpaa2_io object.
 101 * @desc: the dpaa2_io descriptor
 102 * @dev: the actual DPIO device
 103 *
 104 * Activates a "struct dpaa2_io" corresponding to the given config of an actual
 105 * DPIO object.
 106 *
 107 * Return a valid dpaa2_io object for success, or NULL for failure.
 108 */
 109struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
 110                                 struct device *dev)
 111{
 112        struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
 113
 114        if (!obj)
 115                return NULL;
 116
 117        /* check if CPU is out of range (-1 means any cpu) */
 118        if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
 119                kfree(obj);
 120                return NULL;
 121        }
 122
 123        obj->dpio_desc = *desc;
 124        obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
 125        obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
 126        obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
 127        obj->swp = qbman_swp_init(&obj->swp_desc);
 128
 129        if (!obj->swp) {
 130                kfree(obj);
 131                return NULL;
 132        }
 133
 134        INIT_LIST_HEAD(&obj->node);
 135        spin_lock_init(&obj->lock_mgmt_cmd);
 136        spin_lock_init(&obj->lock_notifications);
 137        INIT_LIST_HEAD(&obj->notifications);
 138
 139        /* For now only enable DQRR interrupts */
 140        qbman_swp_interrupt_set_trigger(obj->swp,
 141                                        QBMAN_SWP_INTERRUPT_DQRI);
 142        qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
 143        if (obj->dpio_desc.receives_notifications)
 144                qbman_swp_push_set(obj->swp, 0, 1);
 145
 146        spin_lock(&dpio_list_lock);
 147        list_add_tail(&obj->node, &dpio_list);
 148        if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
 149                dpio_by_cpu[desc->cpu] = obj;
 150        spin_unlock(&dpio_list_lock);
 151
 152        obj->dev = dev;
 153
 154        return obj;
 155}
 156
 157/**
 158 * dpaa2_io_down() - release the dpaa2_io object.
 159 * @d: the dpaa2_io object to be released.
 160 *
 161 * The "struct dpaa2_io" type can represent an individual DPIO object (as
 162 * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
 163 * which can be used to group/encapsulate multiple DPIO objects. In all cases,
 164 * each handle obtained should be released using this function.
 165 */
 166void dpaa2_io_down(struct dpaa2_io *d)
 167{
 168        spin_lock(&dpio_list_lock);
 169        dpio_by_cpu[d->dpio_desc.cpu] = NULL;
 170        list_del(&d->node);
 171        spin_unlock(&dpio_list_lock);
 172
 173        kfree(d);
 174}
 175
 176#define DPAA_POLL_MAX 32
 177
 178/**
 179 * dpaa2_io_irq() - ISR for DPIO interrupts
 180 *
 181 * @obj: the given DPIO object.
 182 *
 183 * Return IRQ_HANDLED for success or IRQ_NONE if there
 184 * were no pending interrupts.
 185 */
 186irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
 187{
 188        const struct dpaa2_dq *dq;
 189        int max = 0;
 190        struct qbman_swp *swp;
 191        u32 status;
 192
 193        swp = obj->swp;
 194        status = qbman_swp_interrupt_read_status(swp);
 195        if (!status)
 196                return IRQ_NONE;
 197
 198        dq = qbman_swp_dqrr_next(swp);
 199        while (dq) {
 200                if (qbman_result_is_SCN(dq)) {
 201                        struct dpaa2_io_notification_ctx *ctx;
 202                        u64 q64;
 203
 204                        q64 = qbman_result_SCN_ctx(dq);
 205                        ctx = (void *)(uintptr_t)q64;
 206                        ctx->cb(ctx);
 207                } else {
 208                        pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
 209                }
 210                qbman_swp_dqrr_consume(swp, dq);
 211                ++max;
 212                if (max > DPAA_POLL_MAX)
 213                        goto done;
 214                dq = qbman_swp_dqrr_next(swp);
 215        }
 216done:
 217        qbman_swp_interrupt_clear_status(swp, status);
 218        qbman_swp_interrupt_set_inhibit(swp, 0);
 219        return IRQ_HANDLED;
 220}
 221
 222/**
 223 * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
 224 *
 225 * @d: the given DPIO object.
 226 *
 227 * Return the cpu associated with the DPIO object
 228 */
 229int dpaa2_io_get_cpu(struct dpaa2_io *d)
 230{
 231        return d->dpio_desc.cpu;
 232}
 233EXPORT_SYMBOL(dpaa2_io_get_cpu);
 234
 235/**
 236 * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
 237 *                               notifications on the given DPIO service.
 238 * @d:   the given DPIO service.
 239 * @ctx: the notification context.
 240 * @dev: the device that requests the register
 241 *
 242 * The caller should make the MC command to attach a DPAA2 object to
 243 * a DPIO after this function completes successfully.  In that way:
 244 *    (a) The DPIO service is "ready" to handle a notification arrival
 245 *        (which might happen before the "attach" command to MC has
 246 *        returned control of execution back to the caller)
 247 *    (b) The DPIO service can provide back to the caller the 'dpio_id' and
 248 *        'qman64' parameters that it should pass along in the MC command
 249 *        in order for the object to be configured to produce the right
 250 *        notification fields to the DPIO service.
 251 *
 252 * Return 0 for success, or -ENODEV for failure.
 253 */
 254int dpaa2_io_service_register(struct dpaa2_io *d,
 255                              struct dpaa2_io_notification_ctx *ctx,
 256                              struct device *dev)
 257{
 258        struct device_link *link;
 259        unsigned long irqflags;
 260
 261        d = service_select_by_cpu(d, ctx->desired_cpu);
 262        if (!d)
 263                return -ENODEV;
 264
 265        link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
 266        if (!link)
 267                return -EINVAL;
 268
 269        ctx->dpio_id = d->dpio_desc.dpio_id;
 270        ctx->qman64 = (u64)(uintptr_t)ctx;
 271        ctx->dpio_private = d;
 272        spin_lock_irqsave(&d->lock_notifications, irqflags);
 273        list_add(&ctx->node, &d->notifications);
 274        spin_unlock_irqrestore(&d->lock_notifications, irqflags);
 275
 276        /* Enable the generation of CDAN notifications */
 277        if (ctx->is_cdan)
 278                return qbman_swp_CDAN_set_context_enable(d->swp,
 279                                                         (u16)ctx->id,
 280                                                         ctx->qman64);
 281        return 0;
 282}
 283EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
 284
 285/**
 286 * dpaa2_io_service_deregister - The opposite of 'register'.
 287 * @service: the given DPIO service.
 288 * @ctx: the notification context.
 289 * @dev: the device that requests to be deregistered
 290 *
 291 * This function should be called only after sending the MC command to
 292 * to detach the notification-producing device from the DPIO.
 293 */
 294void dpaa2_io_service_deregister(struct dpaa2_io *service,
 295                                 struct dpaa2_io_notification_ctx *ctx,
 296                                 struct device *dev)
 297{
 298        struct dpaa2_io *d = ctx->dpio_private;
 299        unsigned long irqflags;
 300
 301        if (ctx->is_cdan)
 302                qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
 303
 304        spin_lock_irqsave(&d->lock_notifications, irqflags);
 305        list_del(&ctx->node);
 306        spin_unlock_irqrestore(&d->lock_notifications, irqflags);
 307
 308        if (dev)
 309                device_link_remove(dev, d->dev);
 310}
 311EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
 312
 313/**
 314 * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
 315 * @d: the given DPIO service.
 316 * @ctx: the notification context.
 317 *
 318 * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
 319 * considered "disarmed". Ie. the user can issue pull dequeue operations on that
 320 * traffic source for as long as it likes. Eventually it may wish to "rearm"
 321 * that source to allow it to produce another FQDAN/CDAN, that's what this
 322 * function achieves.
 323 *
 324 * Return 0 for success.
 325 */
 326int dpaa2_io_service_rearm(struct dpaa2_io *d,
 327                           struct dpaa2_io_notification_ctx *ctx)
 328{
 329        unsigned long irqflags;
 330        int err;
 331
 332        d = service_select_by_cpu(d, ctx->desired_cpu);
 333        if (!unlikely(d))
 334                return -ENODEV;
 335
 336        spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 337        if (ctx->is_cdan)
 338                err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
 339        else
 340                err = qbman_swp_fq_schedule(d->swp, ctx->id);
 341        spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 342
 343        return err;
 344}
 345EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
 346
 347/**
 348 * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
 349 * @d: the given DPIO service.
 350 * @fqid: the given frame queue id.
 351 * @s: the dpaa2_io_store object for the result.
 352 *
 353 * Return 0 for success, or error code for failure.
 354 */
 355int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
 356                             struct dpaa2_io_store *s)
 357{
 358        struct qbman_pull_desc pd;
 359        int err;
 360
 361        qbman_pull_desc_clear(&pd);
 362        qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
 363        qbman_pull_desc_set_numframes(&pd, (u8)s->max);
 364        qbman_pull_desc_set_fq(&pd, fqid);
 365
 366        d = service_select(d);
 367        if (!d)
 368                return -ENODEV;
 369        s->swp = d->swp;
 370        err = qbman_swp_pull(d->swp, &pd);
 371        if (err)
 372                s->swp = NULL;
 373
 374        return err;
 375}
 376EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
 377
 378/**
 379 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
 380 * @d: the given DPIO service.
 381 * @channelid: the given channel id.
 382 * @s: the dpaa2_io_store object for the result.
 383 *
 384 * Return 0 for success, or error code for failure.
 385 */
 386int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
 387                                  struct dpaa2_io_store *s)
 388{
 389        struct qbman_pull_desc pd;
 390        int err;
 391
 392        qbman_pull_desc_clear(&pd);
 393        qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
 394        qbman_pull_desc_set_numframes(&pd, (u8)s->max);
 395        qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
 396
 397        d = service_select(d);
 398        if (!d)
 399                return -ENODEV;
 400
 401        s->swp = d->swp;
 402        err = qbman_swp_pull(d->swp, &pd);
 403        if (err)
 404                s->swp = NULL;
 405
 406        return err;
 407}
 408EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
 409
 410/**
 411 * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
 412 * @d: the given DPIO service.
 413 * @fqid: the given frame queue id.
 414 * @fd: the frame descriptor which is enqueued.
 415 *
 416 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
 417 * or -ENODEV if there is no dpio service.
 418 */
 419int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
 420                                u32 fqid,
 421                                const struct dpaa2_fd *fd)
 422{
 423        struct qbman_eq_desc ed;
 424
 425        d = service_select(d);
 426        if (!d)
 427                return -ENODEV;
 428
 429        qbman_eq_desc_clear(&ed);
 430        qbman_eq_desc_set_no_orp(&ed, 0);
 431        qbman_eq_desc_set_fq(&ed, fqid);
 432
 433        return qbman_swp_enqueue(d->swp, &ed, fd);
 434}
 435EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
 436
 437/**
 438 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
 439 * @d: the given DPIO service.
 440 * @qdid: the given queuing destination id.
 441 * @prio: the given queuing priority.
 442 * @qdbin: the given queuing destination bin.
 443 * @fd: the frame descriptor which is enqueued.
 444 *
 445 * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
 446 * or -ENODEV if there is no dpio service.
 447 */
 448int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
 449                                u32 qdid, u8 prio, u16 qdbin,
 450                                const struct dpaa2_fd *fd)
 451{
 452        struct qbman_eq_desc ed;
 453
 454        d = service_select(d);
 455        if (!d)
 456                return -ENODEV;
 457
 458        qbman_eq_desc_clear(&ed);
 459        qbman_eq_desc_set_no_orp(&ed, 0);
 460        qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
 461
 462        return qbman_swp_enqueue(d->swp, &ed, fd);
 463}
 464EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
 465
 466/**
 467 * dpaa2_io_service_release() - Release buffers to a buffer pool.
 468 * @d: the given DPIO object.
 469 * @bpid: the buffer pool id.
 470 * @buffers: the buffers to be released.
 471 * @num_buffers: the number of the buffers to be released.
 472 *
 473 * Return 0 for success, and negative error code for failure.
 474 */
 475int dpaa2_io_service_release(struct dpaa2_io *d,
 476                             u16 bpid,
 477                             const u64 *buffers,
 478                             unsigned int num_buffers)
 479{
 480        struct qbman_release_desc rd;
 481
 482        d = service_select(d);
 483        if (!d)
 484                return -ENODEV;
 485
 486        qbman_release_desc_clear(&rd);
 487        qbman_release_desc_set_bpid(&rd, bpid);
 488
 489        return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
 490}
 491EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
 492
 493/**
 494 * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
 495 * @d: the given DPIO object.
 496 * @bpid: the buffer pool id.
 497 * @buffers: the buffer addresses for acquired buffers.
 498 * @num_buffers: the expected number of the buffers to acquire.
 499 *
 500 * Return a negative error code if the command failed, otherwise it returns
 501 * the number of buffers acquired, which may be less than the number requested.
 502 * Eg. if the buffer pool is empty, this will return zero.
 503 */
 504int dpaa2_io_service_acquire(struct dpaa2_io *d,
 505                             u16 bpid,
 506                             u64 *buffers,
 507                             unsigned int num_buffers)
 508{
 509        unsigned long irqflags;
 510        int err;
 511
 512        d = service_select(d);
 513        if (!d)
 514                return -ENODEV;
 515
 516        spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 517        err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
 518        spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 519
 520        return err;
 521}
 522EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
 523
 524/*
 525 * 'Stores' are reusable memory blocks for holding dequeue results, and to
 526 * assist with parsing those results.
 527 */
 528
 529/**
 530 * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
 531 * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
 532 * @dev:        the device to allow mapping/unmapping the DMAable region.
 533 *
 534 * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
 535 * The 'dpaa2_io_store' returned is a DPIO service managed object.
 536 *
 537 * Return pointer to dpaa2_io_store struct for successfully created storage
 538 * memory, or NULL on error.
 539 */
 540struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
 541                                             struct device *dev)
 542{
 543        struct dpaa2_io_store *ret;
 544        size_t size;
 545
 546        if (!max_frames || (max_frames > 16))
 547                return NULL;
 548
 549        ret = kmalloc(sizeof(*ret), GFP_KERNEL);
 550        if (!ret)
 551                return NULL;
 552
 553        ret->max = max_frames;
 554        size = max_frames * sizeof(struct dpaa2_dq) + 64;
 555        ret->alloced_addr = kzalloc(size, GFP_KERNEL);
 556        if (!ret->alloced_addr) {
 557                kfree(ret);
 558                return NULL;
 559        }
 560
 561        ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
 562        ret->paddr = dma_map_single(dev, ret->vaddr,
 563                                    sizeof(struct dpaa2_dq) * max_frames,
 564                                    DMA_FROM_DEVICE);
 565        if (dma_mapping_error(dev, ret->paddr)) {
 566                kfree(ret->alloced_addr);
 567                kfree(ret);
 568                return NULL;
 569        }
 570
 571        ret->idx = 0;
 572        ret->dev = dev;
 573
 574        return ret;
 575}
 576EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
 577
 578/**
 579 * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
 580 *                            result.
 581 * @s: the storage memory to be destroyed.
 582 */
 583void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
 584{
 585        dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
 586                         DMA_FROM_DEVICE);
 587        kfree(s->alloced_addr);
 588        kfree(s);
 589}
 590EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
 591
 592/**
 593 * dpaa2_io_store_next() - Determine when the next dequeue result is available.
 594 * @s: the dpaa2_io_store object.
 595 * @is_last: indicate whether this is the last frame in the pull command.
 596 *
 597 * When an object driver performs dequeues to a dpaa2_io_store, this function
 598 * can be used to determine when the next frame result is available. Once
 599 * this function returns non-NULL, a subsequent call to it will try to find
 600 * the next dequeue result.
 601 *
 602 * Note that if a pull-dequeue has a NULL result because the target FQ/channel
 603 * was empty, then this function will also return NULL (rather than expecting
 604 * the caller to always check for this. As such, "is_last" can be used to
 605 * differentiate between "end-of-empty-dequeue" and "still-waiting".
 606 *
 607 * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
 608 */
 609struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
 610{
 611        int match;
 612        struct dpaa2_dq *ret = &s->vaddr[s->idx];
 613
 614        match = qbman_result_has_new_result(s->swp, ret);
 615        if (!match) {
 616                *is_last = 0;
 617                return NULL;
 618        }
 619
 620        s->idx++;
 621
 622        if (dpaa2_dq_is_pull_complete(ret)) {
 623                *is_last = 1;
 624                s->idx = 0;
 625                /*
 626                 * If we get an empty dequeue result to terminate a zero-results
 627                 * vdqcr, return NULL to the caller rather than expecting him to
 628                 * check non-NULL results every time.
 629                 */
 630                if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
 631                        ret = NULL;
 632        } else {
 633                prefetch(&s->vaddr[s->idx]);
 634                *is_last = 0;
 635        }
 636
 637        return ret;
 638}
 639EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
 640
 641/**
 642 * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
 643 * @d: the given DPIO object.
 644 * @fqid: the id of frame queue to be queried.
 645 * @fcnt: the queried frame count.
 646 * @bcnt: the queried byte count.
 647 *
 648 * Knowing the FQ count at run-time can be useful in debugging situations.
 649 * The instantaneous frame- and byte-count are hereby returned.
 650 *
 651 * Return 0 for a successful query, and negative error code if query fails.
 652 */
 653int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
 654                            u32 *fcnt, u32 *bcnt)
 655{
 656        struct qbman_fq_query_np_rslt state;
 657        struct qbman_swp *swp;
 658        unsigned long irqflags;
 659        int ret;
 660
 661        d = service_select(d);
 662        if (!d)
 663                return -ENODEV;
 664
 665        swp = d->swp;
 666        spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 667        ret = qbman_fq_query_state(swp, fqid, &state);
 668        spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 669        if (ret)
 670                return ret;
 671        *fcnt = qbman_fq_state_frame_count(&state);
 672        *bcnt = qbman_fq_state_byte_count(&state);
 673
 674        return 0;
 675}
 676EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
 677
 678/**
 679 * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
 680 * buffer pool.
 681 * @d: the given DPIO object.
 682 * @bpid: the index of buffer pool to be queried.
 683 * @num: the queried number of buffers in the buffer pool.
 684 *
 685 * Return 0 for a successful query, and negative error code if query fails.
 686 */
 687int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
 688{
 689        struct qbman_bp_query_rslt state;
 690        struct qbman_swp *swp;
 691        unsigned long irqflags;
 692        int ret;
 693
 694        d = service_select(d);
 695        if (!d)
 696                return -ENODEV;
 697
 698        swp = d->swp;
 699        spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 700        ret = qbman_bp_query(swp, bpid, &state);
 701        spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 702        if (ret)
 703                return ret;
 704        *num = qbman_bp_info_num_free_bufs(&state);
 705        return 0;
 706}
 707EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
 708