linux/drivers/soc/fsl/dpio/dpio-service.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * Copyright 2014-2016 Freescale Semiconductor Inc.
   4 * Copyright 2016-2019 NXP
   5 *
   6 */
   7#include <linux/types.h>
   8#include <linux/fsl/mc.h>
   9#include <soc/fsl/dpaa2-io.h>
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/platform_device.h>
  13#include <linux/interrupt.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/slab.h>
  16
  17#include "dpio.h"
  18#include "qbman-portal.h"
  19
  20struct dpaa2_io {
  21        struct dpaa2_io_desc dpio_desc;
  22        struct qbman_swp_desc swp_desc;
  23        struct qbman_swp *swp;
  24        struct list_head node;
  25        /* protect against multiple management commands */
  26        spinlock_t lock_mgmt_cmd;
  27        /* protect notifications list */
  28        spinlock_t lock_notifications;
  29        struct list_head notifications;
  30        struct device *dev;
  31};
  32
  33struct dpaa2_io_store {
  34        unsigned int max;
  35        dma_addr_t paddr;
  36        struct dpaa2_dq *vaddr;
  37        void *alloced_addr;    /* unaligned value from kmalloc() */
  38        unsigned int idx;      /* position of the next-to-be-returned entry */
  39        struct qbman_swp *swp; /* portal used to issue VDQCR */
  40        struct device *dev;    /* device used for DMA mapping */
  41};
  42
  43/* keep a per cpu array of DPIOs for fast access */
  44static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
  45static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
  46static DEFINE_SPINLOCK(dpio_list_lock);
  47
  48static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
  49                                                     int cpu)
  50{
  51        if (d)
  52                return d;
  53
  54        if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
  55                return NULL;
  56
  57        /*
  58         * If cpu == -1, choose the current cpu, with no guarantees about
  59         * potentially being migrated away.
  60         */
  61        if (cpu < 0)
  62                cpu = smp_processor_id();
  63
  64        /* If a specific cpu was requested, pick it up immediately */
  65        return dpio_by_cpu[cpu];
  66}
  67
  68static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
  69{
  70        if (d)
  71                return d;
  72
  73        d = service_select_by_cpu(d, -1);
  74        if (d)
  75                return d;
  76
  77        spin_lock(&dpio_list_lock);
  78        d = list_entry(dpio_list.next, struct dpaa2_io, node);
  79        list_del(&d->node);
  80        list_add_tail(&d->node, &dpio_list);
  81        spin_unlock(&dpio_list_lock);
  82
  83        return d;
  84}
  85
  86/**
  87 * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
  88 * @cpu: the cpu id
  89 *
  90 * Return the affine dpaa2_io service, or NULL if there is no service affined
  91 * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
  92 * service.
  93 */
  94struct dpaa2_io *dpaa2_io_service_select(int cpu)
  95{
  96        if (cpu == DPAA2_IO_ANY_CPU)
  97                return service_select(NULL);
  98
  99        return service_select_by_cpu(NULL, cpu);
 100}
 101EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
 102
 103/**
 104 * dpaa2_io_create() - create a dpaa2_io object.
 105 * @desc: the dpaa2_io descriptor
 106 * @dev: the actual DPIO device
 107 *
 108 * Activates a "struct dpaa2_io" corresponding to the given config of an actual
 109 * DPIO object.
 110 *
 111 * Return a valid dpaa2_io object for success, or NULL for failure.
 112 */
 113struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
 114                                 struct device *dev)
 115{
 116        struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
 117
 118        if (!obj)
 119                return NULL;
 120
 121        /* check if CPU is out of range (-1 means any cpu) */
 122        if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
 123                kfree(obj);
 124                return NULL;
 125        }
 126
 127        obj->dpio_desc = *desc;
 128        obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
 129        obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
 130        obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
 131        obj->swp = qbman_swp_init(&obj->swp_desc);
 132
 133        if (!obj->swp) {
 134                kfree(obj);
 135                return NULL;
 136        }
 137
 138        INIT_LIST_HEAD(&obj->node);
 139        spin_lock_init(&obj->lock_mgmt_cmd);
 140        spin_lock_init(&obj->lock_notifications);
 141        INIT_LIST_HEAD(&obj->notifications);
 142
 143        /* For now only enable DQRR interrupts */
 144        qbman_swp_interrupt_set_trigger(obj->swp,
 145                                        QBMAN_SWP_INTERRUPT_DQRI);
 146        qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
 147        if (obj->dpio_desc.receives_notifications)
 148                qbman_swp_push_set(obj->swp, 0, 1);
 149
 150        spin_lock(&dpio_list_lock);
 151        list_add_tail(&obj->node, &dpio_list);
 152        if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
 153                dpio_by_cpu[desc->cpu] = obj;
 154        spin_unlock(&dpio_list_lock);
 155
 156        obj->dev = dev;
 157
 158        return obj;
 159}
 160
 161/**
 162 * dpaa2_io_down() - release the dpaa2_io object.
 163 * @d: the dpaa2_io object to be released.
 164 *
 165 * The "struct dpaa2_io" type can represent an individual DPIO object (as
 166 * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
 167 * which can be used to group/encapsulate multiple DPIO objects. In all cases,
 168 * each handle obtained should be released using this function.
 169 */
 170void dpaa2_io_down(struct dpaa2_io *d)
 171{
 172        spin_lock(&dpio_list_lock);
 173        dpio_by_cpu[d->dpio_desc.cpu] = NULL;
 174        list_del(&d->node);
 175        spin_unlock(&dpio_list_lock);
 176
 177        kfree(d);
 178}
 179
 180#define DPAA_POLL_MAX 32
 181
 182/**
 183 * dpaa2_io_irq() - ISR for DPIO interrupts
 184 *
 185 * @obj: the given DPIO object.
 186 *
 187 * Return IRQ_HANDLED for success or IRQ_NONE if there
 188 * were no pending interrupts.
 189 */
 190irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
 191{
 192        const struct dpaa2_dq *dq;
 193        int max = 0;
 194        struct qbman_swp *swp;
 195        u32 status;
 196
 197        swp = obj->swp;
 198        status = qbman_swp_interrupt_read_status(swp);
 199        if (!status)
 200                return IRQ_NONE;
 201
 202        dq = qbman_swp_dqrr_next(swp);
 203        while (dq) {
 204                if (qbman_result_is_SCN(dq)) {
 205                        struct dpaa2_io_notification_ctx *ctx;
 206                        u64 q64;
 207
 208                        q64 = qbman_result_SCN_ctx(dq);
 209                        ctx = (void *)(uintptr_t)q64;
 210                        ctx->cb(ctx);
 211                } else {
 212                        pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
 213                }
 214                qbman_swp_dqrr_consume(swp, dq);
 215                ++max;
 216                if (max > DPAA_POLL_MAX)
 217                        goto done;
 218                dq = qbman_swp_dqrr_next(swp);
 219        }
 220done:
 221        qbman_swp_interrupt_clear_status(swp, status);
 222        qbman_swp_interrupt_set_inhibit(swp, 0);
 223        return IRQ_HANDLED;
 224}
 225
 226/**
 227 * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
 228 *
 229 * @d: the given DPIO object.
 230 *
 231 * Return the cpu associated with the DPIO object
 232 */
 233int dpaa2_io_get_cpu(struct dpaa2_io *d)
 234{
 235        return d->dpio_desc.cpu;
 236}
 237EXPORT_SYMBOL(dpaa2_io_get_cpu);
 238
 239/**
 240 * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
 241 *                               notifications on the given DPIO service.
 242 * @d:   the given DPIO service.
 243 * @ctx: the notification context.
 244 * @dev: the device that requests the register
 245 *
 246 * The caller should make the MC command to attach a DPAA2 object to
 247 * a DPIO after this function completes successfully.  In that way:
 248 *    (a) The DPIO service is "ready" to handle a notification arrival
 249 *        (which might happen before the "attach" command to MC has
 250 *        returned control of execution back to the caller)
 251 *    (b) The DPIO service can provide back to the caller the 'dpio_id' and
 252 *        'qman64' parameters that it should pass along in the MC command
 253 *        in order for the object to be configured to produce the right
 254 *        notification fields to the DPIO service.
 255 *
 256 * Return 0 for success, or -ENODEV for failure.
 257 */
 258int dpaa2_io_service_register(struct dpaa2_io *d,
 259                              struct dpaa2_io_notification_ctx *ctx,
 260                              struct device *dev)
 261{
 262        struct device_link *link;
 263        unsigned long irqflags;
 264
 265        d = service_select_by_cpu(d, ctx->desired_cpu);
 266        if (!d)
 267                return -ENODEV;
 268
 269        link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
 270        if (!link)
 271                return -EINVAL;
 272
 273        ctx->dpio_id = d->dpio_desc.dpio_id;
 274        ctx->qman64 = (u64)(uintptr_t)ctx;
 275        ctx->dpio_private = d;
 276        spin_lock_irqsave(&d->lock_notifications, irqflags);
 277        list_add(&ctx->node, &d->notifications);
 278        spin_unlock_irqrestore(&d->lock_notifications, irqflags);
 279
 280        /* Enable the generation of CDAN notifications */
 281        if (ctx->is_cdan)
 282                return qbman_swp_CDAN_set_context_enable(d->swp,
 283                                                         (u16)ctx->id,
 284                                                         ctx->qman64);
 285        return 0;
 286}
 287EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
 288
 289/**
 290 * dpaa2_io_service_deregister - The opposite of 'register'.
 291 * @service: the given DPIO service.
 292 * @ctx: the notification context.
 293 * @dev: the device that requests to be deregistered
 294 *
 295 * This function should be called only after sending the MC command to
 296 * to detach the notification-producing device from the DPIO.
 297 */
 298void dpaa2_io_service_deregister(struct dpaa2_io *service,
 299                                 struct dpaa2_io_notification_ctx *ctx,
 300                                 struct device *dev)
 301{
 302        struct dpaa2_io *d = ctx->dpio_private;
 303        unsigned long irqflags;
 304
 305        if (ctx->is_cdan)
 306                qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
 307
 308        spin_lock_irqsave(&d->lock_notifications, irqflags);
 309        list_del(&ctx->node);
 310        spin_unlock_irqrestore(&d->lock_notifications, irqflags);
 311
 312}
 313EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
 314
 315/**
 316 * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
 317 * @d: the given DPIO service.
 318 * @ctx: the notification context.
 319 *
 320 * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
 321 * considered "disarmed". Ie. the user can issue pull dequeue operations on that
 322 * traffic source for as long as it likes. Eventually it may wish to "rearm"
 323 * that source to allow it to produce another FQDAN/CDAN, that's what this
 324 * function achieves.
 325 *
 326 * Return 0 for success.
 327 */
 328int dpaa2_io_service_rearm(struct dpaa2_io *d,
 329                           struct dpaa2_io_notification_ctx *ctx)
 330{
 331        unsigned long irqflags;
 332        int err;
 333
 334        d = service_select_by_cpu(d, ctx->desired_cpu);
 335        if (!unlikely(d))
 336                return -ENODEV;
 337
 338        spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 339        if (ctx->is_cdan)
 340                err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
 341        else
 342                err = qbman_swp_fq_schedule(d->swp, ctx->id);
 343        spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 344
 345        return err;
 346}
 347EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
 348
 349/**
 350 * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
 351 * @d: the given DPIO service.
 352 * @fqid: the given frame queue id.
 353 * @s: the dpaa2_io_store object for the result.
 354 *
 355 * Return 0 for success, or error code for failure.
 356 */
 357int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
 358                             struct dpaa2_io_store *s)
 359{
 360        struct qbman_pull_desc pd;
 361        int err;
 362
 363        qbman_pull_desc_clear(&pd);
 364        qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
 365        qbman_pull_desc_set_numframes(&pd, (u8)s->max);
 366        qbman_pull_desc_set_fq(&pd, fqid);
 367
 368        d = service_select(d);
 369        if (!d)
 370                return -ENODEV;
 371        s->swp = d->swp;
 372        err = qbman_swp_pull(d->swp, &pd);
 373        if (err)
 374                s->swp = NULL;
 375
 376        return err;
 377}
 378EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
 379
 380/**
 381 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
 382 * @d: the given DPIO service.
 383 * @channelid: the given channel id.
 384 * @s: the dpaa2_io_store object for the result.
 385 *
 386 * Return 0 for success, or error code for failure.
 387 */
 388int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
 389                                  struct dpaa2_io_store *s)
 390{
 391        struct qbman_pull_desc pd;
 392        int err;
 393
 394        qbman_pull_desc_clear(&pd);
 395        qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
 396        qbman_pull_desc_set_numframes(&pd, (u8)s->max);
 397        qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
 398
 399        d = service_select(d);
 400        if (!d)
 401                return -ENODEV;
 402
 403        s->swp = d->swp;
 404        err = qbman_swp_pull(d->swp, &pd);
 405        if (err)
 406                s->swp = NULL;
 407
 408        return err;
 409}
 410EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
 411
 412/**
 413 * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
 414 * @d: the given DPIO service.
 415 * @fqid: the given frame queue id.
 416 * @fd: the frame descriptor which is enqueued.
 417 *
 418 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
 419 * or -ENODEV if there is no dpio service.
 420 */
 421int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
 422                                u32 fqid,
 423                                const struct dpaa2_fd *fd)
 424{
 425        struct qbman_eq_desc ed;
 426
 427        d = service_select(d);
 428        if (!d)
 429                return -ENODEV;
 430
 431        qbman_eq_desc_clear(&ed);
 432        qbman_eq_desc_set_no_orp(&ed, 0);
 433        qbman_eq_desc_set_fq(&ed, fqid);
 434
 435        return qbman_swp_enqueue(d->swp, &ed, fd);
 436}
 437EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
 438
 439/**
 440 * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
 441 * to a frame queue using one fqid.
 442 * @d: the given DPIO service.
 443 * @fqid: the given frame queue id.
 444 * @fd: the frame descriptor which is enqueued.
 445 * @nb: number of frames to be enqueud
 446 *
 447 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
 448 * or -ENODEV if there is no dpio service.
 449 */
 450int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
 451                                u32 fqid,
 452                                const struct dpaa2_fd *fd,
 453                                int nb)
 454{
 455        struct qbman_eq_desc ed;
 456
 457        d = service_select(d);
 458        if (!d)
 459                return -ENODEV;
 460
 461        qbman_eq_desc_clear(&ed);
 462        qbman_eq_desc_set_no_orp(&ed, 0);
 463        qbman_eq_desc_set_fq(&ed, fqid);
 464
 465        return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
 466}
 467EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
 468
 469/**
 470 * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
 471 * to different frame queue using a list of fqids.
 472 * @d: the given DPIO service.
 473 * @fqid: the given list of frame queue ids.
 474 * @fd: the frame descriptor which is enqueued.
 475 * @nb: number of frames to be enqueud
 476 *
 477 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
 478 * or -ENODEV if there is no dpio service.
 479 */
 480int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
 481                                u32 *fqid,
 482                                const struct dpaa2_fd *fd,
 483                                int nb)
 484{
 485        struct qbman_eq_desc *ed;
 486        int i, ret;
 487
 488        ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
 489        if (!ed)
 490                return -ENOMEM;
 491
 492        d = service_select(d);
 493        if (!d) {
 494                ret = -ENODEV;
 495                goto out;
 496        }
 497
 498        for (i = 0; i < nb; i++) {
 499                qbman_eq_desc_clear(&ed[i]);
 500                qbman_eq_desc_set_no_orp(&ed[i], 0);
 501                qbman_eq_desc_set_fq(&ed[i], fqid[i]);
 502        }
 503
 504        ret = qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
 505out:
 506        kfree(ed);
 507        return ret;
 508}
 509EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
 510
 511/**
 512 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
 513 * @d: the given DPIO service.
 514 * @qdid: the given queuing destination id.
 515 * @prio: the given queuing priority.
 516 * @qdbin: the given queuing destination bin.
 517 * @fd: the frame descriptor which is enqueued.
 518 *
 519 * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
 520 * or -ENODEV if there is no dpio service.
 521 */
 522int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
 523                                u32 qdid, u8 prio, u16 qdbin,
 524                                const struct dpaa2_fd *fd)
 525{
 526        struct qbman_eq_desc ed;
 527
 528        d = service_select(d);
 529        if (!d)
 530                return -ENODEV;
 531
 532        qbman_eq_desc_clear(&ed);
 533        qbman_eq_desc_set_no_orp(&ed, 0);
 534        qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
 535
 536        return qbman_swp_enqueue(d->swp, &ed, fd);
 537}
 538EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
 539
 540/**
 541 * dpaa2_io_service_release() - Release buffers to a buffer pool.
 542 * @d: the given DPIO object.
 543 * @bpid: the buffer pool id.
 544 * @buffers: the buffers to be released.
 545 * @num_buffers: the number of the buffers to be released.
 546 *
 547 * Return 0 for success, and negative error code for failure.
 548 */
 549int dpaa2_io_service_release(struct dpaa2_io *d,
 550                             u16 bpid,
 551                             const u64 *buffers,
 552                             unsigned int num_buffers)
 553{
 554        struct qbman_release_desc rd;
 555
 556        d = service_select(d);
 557        if (!d)
 558                return -ENODEV;
 559
 560        qbman_release_desc_clear(&rd);
 561        qbman_release_desc_set_bpid(&rd, bpid);
 562
 563        return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
 564}
 565EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
 566
 567/**
 568 * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
 569 * @d: the given DPIO object.
 570 * @bpid: the buffer pool id.
 571 * @buffers: the buffer addresses for acquired buffers.
 572 * @num_buffers: the expected number of the buffers to acquire.
 573 *
 574 * Return a negative error code if the command failed, otherwise it returns
 575 * the number of buffers acquired, which may be less than the number requested.
 576 * Eg. if the buffer pool is empty, this will return zero.
 577 */
 578int dpaa2_io_service_acquire(struct dpaa2_io *d,
 579                             u16 bpid,
 580                             u64 *buffers,
 581                             unsigned int num_buffers)
 582{
 583        unsigned long irqflags;
 584        int err;
 585
 586        d = service_select(d);
 587        if (!d)
 588                return -ENODEV;
 589
 590        spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 591        err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
 592        spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 593
 594        return err;
 595}
 596EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
 597
 598/*
 599 * 'Stores' are reusable memory blocks for holding dequeue results, and to
 600 * assist with parsing those results.
 601 */
 602
 603/**
 604 * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
 605 * @max_frames: the maximum number of dequeued result for frames, must be <= 32.
 606 * @dev:        the device to allow mapping/unmapping the DMAable region.
 607 *
 608 * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
 609 * The 'dpaa2_io_store' returned is a DPIO service managed object.
 610 *
 611 * Return pointer to dpaa2_io_store struct for successfully created storage
 612 * memory, or NULL on error.
 613 */
 614struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
 615                                             struct device *dev)
 616{
 617        struct dpaa2_io_store *ret;
 618        size_t size;
 619
 620        if (!max_frames || (max_frames > 32))
 621                return NULL;
 622
 623        ret = kmalloc(sizeof(*ret), GFP_KERNEL);
 624        if (!ret)
 625                return NULL;
 626
 627        ret->max = max_frames;
 628        size = max_frames * sizeof(struct dpaa2_dq) + 64;
 629        ret->alloced_addr = kzalloc(size, GFP_KERNEL);
 630        if (!ret->alloced_addr) {
 631                kfree(ret);
 632                return NULL;
 633        }
 634
 635        ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
 636        ret->paddr = dma_map_single(dev, ret->vaddr,
 637                                    sizeof(struct dpaa2_dq) * max_frames,
 638                                    DMA_FROM_DEVICE);
 639        if (dma_mapping_error(dev, ret->paddr)) {
 640                kfree(ret->alloced_addr);
 641                kfree(ret);
 642                return NULL;
 643        }
 644
 645        ret->idx = 0;
 646        ret->dev = dev;
 647
 648        return ret;
 649}
 650EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
 651
 652/**
 653 * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
 654 *                            result.
 655 * @s: the storage memory to be destroyed.
 656 */
 657void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
 658{
 659        dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
 660                         DMA_FROM_DEVICE);
 661        kfree(s->alloced_addr);
 662        kfree(s);
 663}
 664EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
 665
 666/**
 667 * dpaa2_io_store_next() - Determine when the next dequeue result is available.
 668 * @s: the dpaa2_io_store object.
 669 * @is_last: indicate whether this is the last frame in the pull command.
 670 *
 671 * When an object driver performs dequeues to a dpaa2_io_store, this function
 672 * can be used to determine when the next frame result is available. Once
 673 * this function returns non-NULL, a subsequent call to it will try to find
 674 * the next dequeue result.
 675 *
 676 * Note that if a pull-dequeue has a NULL result because the target FQ/channel
 677 * was empty, then this function will also return NULL (rather than expecting
 678 * the caller to always check for this. As such, "is_last" can be used to
 679 * differentiate between "end-of-empty-dequeue" and "still-waiting".
 680 *
 681 * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
 682 */
 683struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
 684{
 685        int match;
 686        struct dpaa2_dq *ret = &s->vaddr[s->idx];
 687
 688        match = qbman_result_has_new_result(s->swp, ret);
 689        if (!match) {
 690                *is_last = 0;
 691                return NULL;
 692        }
 693
 694        s->idx++;
 695
 696        if (dpaa2_dq_is_pull_complete(ret)) {
 697                *is_last = 1;
 698                s->idx = 0;
 699                /*
 700                 * If we get an empty dequeue result to terminate a zero-results
 701                 * vdqcr, return NULL to the caller rather than expecting him to
 702                 * check non-NULL results every time.
 703                 */
 704                if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
 705                        ret = NULL;
 706        } else {
 707                prefetch(&s->vaddr[s->idx]);
 708                *is_last = 0;
 709        }
 710
 711        return ret;
 712}
 713EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
 714
 715/**
 716 * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
 717 * @d: the given DPIO object.
 718 * @fqid: the id of frame queue to be queried.
 719 * @fcnt: the queried frame count.
 720 * @bcnt: the queried byte count.
 721 *
 722 * Knowing the FQ count at run-time can be useful in debugging situations.
 723 * The instantaneous frame- and byte-count are hereby returned.
 724 *
 725 * Return 0 for a successful query, and negative error code if query fails.
 726 */
 727int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
 728                            u32 *fcnt, u32 *bcnt)
 729{
 730        struct qbman_fq_query_np_rslt state;
 731        struct qbman_swp *swp;
 732        unsigned long irqflags;
 733        int ret;
 734
 735        d = service_select(d);
 736        if (!d)
 737                return -ENODEV;
 738
 739        swp = d->swp;
 740        spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 741        ret = qbman_fq_query_state(swp, fqid, &state);
 742        spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 743        if (ret)
 744                return ret;
 745        *fcnt = qbman_fq_state_frame_count(&state);
 746        *bcnt = qbman_fq_state_byte_count(&state);
 747
 748        return 0;
 749}
 750EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
 751
 752/**
 753 * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
 754 * buffer pool.
 755 * @d: the given DPIO object.
 756 * @bpid: the index of buffer pool to be queried.
 757 * @num: the queried number of buffers in the buffer pool.
 758 *
 759 * Return 0 for a successful query, and negative error code if query fails.
 760 */
 761int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
 762{
 763        struct qbman_bp_query_rslt state;
 764        struct qbman_swp *swp;
 765        unsigned long irqflags;
 766        int ret;
 767
 768        d = service_select(d);
 769        if (!d)
 770                return -ENODEV;
 771
 772        swp = d->swp;
 773        spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 774        ret = qbman_bp_query(swp, bpid, &state);
 775        spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 776        if (ret)
 777                return ret;
 778        *num = qbman_bp_info_num_free_bufs(&state);
 779        return 0;
 780}
 781EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
 782