linux/drivers/soc/ti/knav_qmss_queue.c
<<
>>
Prefs
   1/*
   2 * Keystone Queue Manager subsystem driver
   3 *
   4 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
   5 * Authors:     Sandeep Nair <sandeep_n@ti.com>
   6 *              Cyril Chemparathy <cyril@ti.com>
   7 *              Santosh Shilimkar <santosh.shilimkar@ti.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * version 2 as published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/device.h>
  22#include <linux/clk.h>
  23#include <linux/io.h>
  24#include <linux/interrupt.h>
  25#include <linux/bitops.h>
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/platform_device.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/of.h>
  31#include <linux/of_irq.h>
  32#include <linux/of_device.h>
  33#include <linux/of_address.h>
  34#include <linux/pm_runtime.h>
  35#include <linux/firmware.h>
  36#include <linux/debugfs.h>
  37#include <linux/seq_file.h>
  38#include <linux/string.h>
  39#include <linux/soc/ti/knav_qmss.h>
  40
  41#include "knav_qmss.h"
  42
  43static struct knav_device *kdev;
  44static DEFINE_MUTEX(knav_dev_lock);
  45
  46/* Queue manager register indices in DTS */
  47#define KNAV_QUEUE_PEEK_REG_INDEX       0
  48#define KNAV_QUEUE_STATUS_REG_INDEX     1
  49#define KNAV_QUEUE_CONFIG_REG_INDEX     2
  50#define KNAV_QUEUE_REGION_REG_INDEX     3
  51#define KNAV_QUEUE_PUSH_REG_INDEX       4
  52#define KNAV_QUEUE_POP_REG_INDEX        5
  53
  54/* PDSP register indices in DTS */
  55#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX  0
  56#define KNAV_QUEUE_PDSP_REGS_REG_INDEX  1
  57#define KNAV_QUEUE_PDSP_INTD_REG_INDEX  2
  58#define KNAV_QUEUE_PDSP_CMD_REG_INDEX   3
  59
  60#define knav_queue_idx_to_inst(kdev, idx)                       \
  61        (kdev->instances + (idx << kdev->inst_shift))
  62
  63#define for_each_handle_rcu(qh, inst)                   \
  64        list_for_each_entry_rcu(qh, &inst->handles, list)
  65
  66#define for_each_instance(idx, inst, kdev)              \
  67        for (idx = 0, inst = kdev->instances;           \
  68             idx < (kdev)->num_queues_in_use;                   \
  69             idx++, inst = knav_queue_idx_to_inst(kdev, idx))
  70
  71/* All firmware file names end up here. List the firmware file names below.
  72 * Newest followed by older ones. Search is done from start of the array
  73 * until a firmware file is found.
  74 */
  75const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
  76
  77/**
  78 * knav_queue_notify: qmss queue notfier call
  79 *
  80 * @inst:               qmss queue instance like accumulator
  81 */
  82void knav_queue_notify(struct knav_queue_inst *inst)
  83{
  84        struct knav_queue *qh;
  85
  86        if (!inst)
  87                return;
  88
  89        rcu_read_lock();
  90        for_each_handle_rcu(qh, inst) {
  91                if (atomic_read(&qh->notifier_enabled) <= 0)
  92                        continue;
  93                if (WARN_ON(!qh->notifier_fn))
  94                        continue;
  95                atomic_inc(&qh->stats.notifies);
  96                qh->notifier_fn(qh->notifier_fn_arg);
  97        }
  98        rcu_read_unlock();
  99}
 100EXPORT_SYMBOL_GPL(knav_queue_notify);
 101
 102static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
 103{
 104        struct knav_queue_inst *inst = _instdata;
 105
 106        knav_queue_notify(inst);
 107        return IRQ_HANDLED;
 108}
 109
 110static int knav_queue_setup_irq(struct knav_range_info *range,
 111                          struct knav_queue_inst *inst)
 112{
 113        unsigned queue = inst->id - range->queue_base;
 114        unsigned long cpu_map;
 115        int ret = 0, irq;
 116
 117        if (range->flags & RANGE_HAS_IRQ) {
 118                irq = range->irqs[queue].irq;
 119                cpu_map = range->irqs[queue].cpu_map;
 120                ret = request_irq(irq, knav_queue_int_handler, 0,
 121                                        inst->irq_name, inst);
 122                if (ret)
 123                        return ret;
 124                disable_irq(irq);
 125                if (cpu_map) {
 126                        ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
 127                        if (ret) {
 128                                dev_warn(range->kdev->dev,
 129                                         "Failed to set IRQ affinity\n");
 130                                return ret;
 131                        }
 132                }
 133        }
 134        return ret;
 135}
 136
 137static void knav_queue_free_irq(struct knav_queue_inst *inst)
 138{
 139        struct knav_range_info *range = inst->range;
 140        unsigned queue = inst->id - inst->range->queue_base;
 141        int irq;
 142
 143        if (range->flags & RANGE_HAS_IRQ) {
 144                irq = range->irqs[queue].irq;
 145                irq_set_affinity_hint(irq, NULL);
 146                free_irq(irq, inst);
 147        }
 148}
 149
 150static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
 151{
 152        return !list_empty(&inst->handles);
 153}
 154
 155static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
 156{
 157        return inst->range->flags & RANGE_RESERVED;
 158}
 159
 160static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
 161{
 162        struct knav_queue *tmp;
 163
 164        rcu_read_lock();
 165        for_each_handle_rcu(tmp, inst) {
 166                if (tmp->flags & KNAV_QUEUE_SHARED) {
 167                        rcu_read_unlock();
 168                        return true;
 169                }
 170        }
 171        rcu_read_unlock();
 172        return false;
 173}
 174
 175static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
 176                                                unsigned type)
 177{
 178        if ((type == KNAV_QUEUE_QPEND) &&
 179            (inst->range->flags & RANGE_HAS_IRQ)) {
 180                return true;
 181        } else if ((type == KNAV_QUEUE_ACC) &&
 182                (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
 183                return true;
 184        } else if ((type == KNAV_QUEUE_GP) &&
 185                !(inst->range->flags &
 186                        (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
 187                return true;
 188        }
 189        return false;
 190}
 191
 192static inline struct knav_queue_inst *
 193knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
 194{
 195        struct knav_queue_inst *inst;
 196        int idx;
 197
 198        for_each_instance(idx, inst, kdev) {
 199                if (inst->id == id)
 200                        return inst;
 201        }
 202        return NULL;
 203}
 204
 205static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
 206{
 207        if (kdev->base_id <= id &&
 208            kdev->base_id + kdev->num_queues > id) {
 209                id -= kdev->base_id;
 210                return knav_queue_match_id_to_inst(kdev, id);
 211        }
 212        return NULL;
 213}
 214
 215static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
 216                                      const char *name, unsigned flags)
 217{
 218        struct knav_queue *qh;
 219        unsigned id;
 220        int ret = 0;
 221
 222        qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
 223        if (!qh)
 224                return ERR_PTR(-ENOMEM);
 225
 226        qh->flags = flags;
 227        qh->inst = inst;
 228        id = inst->id - inst->qmgr->start_queue;
 229        qh->reg_push = &inst->qmgr->reg_push[id];
 230        qh->reg_pop = &inst->qmgr->reg_pop[id];
 231        qh->reg_peek = &inst->qmgr->reg_peek[id];
 232
 233        /* first opener? */
 234        if (!knav_queue_is_busy(inst)) {
 235                struct knav_range_info *range = inst->range;
 236
 237                inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
 238                if (range->ops && range->ops->open_queue)
 239                        ret = range->ops->open_queue(range, inst, flags);
 240
 241                if (ret) {
 242                        devm_kfree(inst->kdev->dev, qh);
 243                        return ERR_PTR(ret);
 244                }
 245        }
 246        list_add_tail_rcu(&qh->list, &inst->handles);
 247        return qh;
 248}
 249
 250static struct knav_queue *
 251knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
 252{
 253        struct knav_queue_inst *inst;
 254        struct knav_queue *qh;
 255
 256        mutex_lock(&knav_dev_lock);
 257
 258        qh = ERR_PTR(-ENODEV);
 259        inst = knav_queue_find_by_id(id);
 260        if (!inst)
 261                goto unlock_ret;
 262
 263        qh = ERR_PTR(-EEXIST);
 264        if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
 265                goto unlock_ret;
 266
 267        qh = ERR_PTR(-EBUSY);
 268        if ((flags & KNAV_QUEUE_SHARED) &&
 269            (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
 270                goto unlock_ret;
 271
 272        qh = __knav_queue_open(inst, name, flags);
 273
 274unlock_ret:
 275        mutex_unlock(&knav_dev_lock);
 276
 277        return qh;
 278}
 279
 280static struct knav_queue *knav_queue_open_by_type(const char *name,
 281                                                unsigned type, unsigned flags)
 282{
 283        struct knav_queue_inst *inst;
 284        struct knav_queue *qh = ERR_PTR(-EINVAL);
 285        int idx;
 286
 287        mutex_lock(&knav_dev_lock);
 288
 289        for_each_instance(idx, inst, kdev) {
 290                if (knav_queue_is_reserved(inst))
 291                        continue;
 292                if (!knav_queue_match_type(inst, type))
 293                        continue;
 294                if (knav_queue_is_busy(inst))
 295                        continue;
 296                qh = __knav_queue_open(inst, name, flags);
 297                goto unlock_ret;
 298        }
 299
 300unlock_ret:
 301        mutex_unlock(&knav_dev_lock);
 302        return qh;
 303}
 304
 305static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
 306{
 307        struct knav_range_info *range = inst->range;
 308
 309        if (range->ops && range->ops->set_notify)
 310                range->ops->set_notify(range, inst, enabled);
 311}
 312
 313static int knav_queue_enable_notifier(struct knav_queue *qh)
 314{
 315        struct knav_queue_inst *inst = qh->inst;
 316        bool first;
 317
 318        if (WARN_ON(!qh->notifier_fn))
 319                return -EINVAL;
 320
 321        /* Adjust the per handle notifier count */
 322        first = (atomic_inc_return(&qh->notifier_enabled) == 1);
 323        if (!first)
 324                return 0; /* nothing to do */
 325
 326        /* Now adjust the per instance notifier count */
 327        first = (atomic_inc_return(&inst->num_notifiers) == 1);
 328        if (first)
 329                knav_queue_set_notify(inst, true);
 330
 331        return 0;
 332}
 333
 334static int knav_queue_disable_notifier(struct knav_queue *qh)
 335{
 336        struct knav_queue_inst *inst = qh->inst;
 337        bool last;
 338
 339        last = (atomic_dec_return(&qh->notifier_enabled) == 0);
 340        if (!last)
 341                return 0; /* nothing to do */
 342
 343        last = (atomic_dec_return(&inst->num_notifiers) == 0);
 344        if (last)
 345                knav_queue_set_notify(inst, false);
 346
 347        return 0;
 348}
 349
 350static int knav_queue_set_notifier(struct knav_queue *qh,
 351                                struct knav_queue_notify_config *cfg)
 352{
 353        knav_queue_notify_fn old_fn = qh->notifier_fn;
 354
 355        if (!cfg)
 356                return -EINVAL;
 357
 358        if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
 359                return -ENOTSUPP;
 360
 361        if (!cfg->fn && old_fn)
 362                knav_queue_disable_notifier(qh);
 363
 364        qh->notifier_fn = cfg->fn;
 365        qh->notifier_fn_arg = cfg->fn_arg;
 366
 367        if (cfg->fn && !old_fn)
 368                knav_queue_enable_notifier(qh);
 369
 370        return 0;
 371}
 372
 373static int knav_gp_set_notify(struct knav_range_info *range,
 374                               struct knav_queue_inst *inst,
 375                               bool enabled)
 376{
 377        unsigned queue;
 378
 379        if (range->flags & RANGE_HAS_IRQ) {
 380                queue = inst->id - range->queue_base;
 381                if (enabled)
 382                        enable_irq(range->irqs[queue].irq);
 383                else
 384                        disable_irq_nosync(range->irqs[queue].irq);
 385        }
 386        return 0;
 387}
 388
 389static int knav_gp_open_queue(struct knav_range_info *range,
 390                                struct knav_queue_inst *inst, unsigned flags)
 391{
 392        return knav_queue_setup_irq(range, inst);
 393}
 394
 395static int knav_gp_close_queue(struct knav_range_info *range,
 396                                struct knav_queue_inst *inst)
 397{
 398        knav_queue_free_irq(inst);
 399        return 0;
 400}
 401
 402struct knav_range_ops knav_gp_range_ops = {
 403        .set_notify     = knav_gp_set_notify,
 404        .open_queue     = knav_gp_open_queue,
 405        .close_queue    = knav_gp_close_queue,
 406};
 407
 408
 409static int knav_queue_get_count(void *qhandle)
 410{
 411        struct knav_queue *qh = qhandle;
 412        struct knav_queue_inst *inst = qh->inst;
 413
 414        return readl_relaxed(&qh->reg_peek[0].entry_count) +
 415                atomic_read(&inst->desc_count);
 416}
 417
 418static void knav_queue_debug_show_instance(struct seq_file *s,
 419                                        struct knav_queue_inst *inst)
 420{
 421        struct knav_device *kdev = inst->kdev;
 422        struct knav_queue *qh;
 423
 424        if (!knav_queue_is_busy(inst))
 425                return;
 426
 427        seq_printf(s, "\tqueue id %d (%s)\n",
 428                   kdev->base_id + inst->id, inst->name);
 429        for_each_handle_rcu(qh, inst) {
 430                seq_printf(s, "\t\thandle %p: ", qh);
 431                seq_printf(s, "pushes %8d, ",
 432                           atomic_read(&qh->stats.pushes));
 433                seq_printf(s, "pops %8d, ",
 434                           atomic_read(&qh->stats.pops));
 435                seq_printf(s, "count %8d, ",
 436                           knav_queue_get_count(qh));
 437                seq_printf(s, "notifies %8d, ",
 438                           atomic_read(&qh->stats.notifies));
 439                seq_printf(s, "push errors %8d, ",
 440                           atomic_read(&qh->stats.push_errors));
 441                seq_printf(s, "pop errors %8d\n",
 442                           atomic_read(&qh->stats.pop_errors));
 443        }
 444}
 445
 446static int knav_queue_debug_show(struct seq_file *s, void *v)
 447{
 448        struct knav_queue_inst *inst;
 449        int idx;
 450
 451        mutex_lock(&knav_dev_lock);
 452        seq_printf(s, "%s: %u-%u\n",
 453                   dev_name(kdev->dev), kdev->base_id,
 454                   kdev->base_id + kdev->num_queues - 1);
 455        for_each_instance(idx, inst, kdev)
 456                knav_queue_debug_show_instance(s, inst);
 457        mutex_unlock(&knav_dev_lock);
 458
 459        return 0;
 460}
 461
 462static int knav_queue_debug_open(struct inode *inode, struct file *file)
 463{
 464        return single_open(file, knav_queue_debug_show, NULL);
 465}
 466
 467static const struct file_operations knav_queue_debug_ops = {
 468        .open           = knav_queue_debug_open,
 469        .read           = seq_read,
 470        .llseek         = seq_lseek,
 471        .release        = single_release,
 472};
 473
 474static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
 475                                        u32 flags)
 476{
 477        unsigned long end;
 478        u32 val = 0;
 479
 480        end = jiffies + msecs_to_jiffies(timeout);
 481        while (time_after(end, jiffies)) {
 482                val = readl_relaxed(addr);
 483                if (flags)
 484                        val &= flags;
 485                if (!val)
 486                        break;
 487                cpu_relax();
 488        }
 489        return val ? -ETIMEDOUT : 0;
 490}
 491
 492
 493static int knav_queue_flush(struct knav_queue *qh)
 494{
 495        struct knav_queue_inst *inst = qh->inst;
 496        unsigned id = inst->id - inst->qmgr->start_queue;
 497
 498        atomic_set(&inst->desc_count, 0);
 499        writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
 500        return 0;
 501}
 502
 503/**
 504 * knav_queue_open()    - open a hardware queue
 505 * @name                - name to give the queue handle
 506 * @id                  - desired queue number if any or specifes the type
 507 *                        of queue
 508 * @flags               - the following flags are applicable to queues:
 509 *      KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
 510 *                           exclusive by default.
 511 *                           Subsequent attempts to open a shared queue should
 512 *                           also have this flag.
 513 *
 514 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
 515 * to check the returned value for error codes.
 516 */
 517void *knav_queue_open(const char *name, unsigned id,
 518                                        unsigned flags)
 519{
 520        struct knav_queue *qh = ERR_PTR(-EINVAL);
 521
 522        switch (id) {
 523        case KNAV_QUEUE_QPEND:
 524        case KNAV_QUEUE_ACC:
 525        case KNAV_QUEUE_GP:
 526                qh = knav_queue_open_by_type(name, id, flags);
 527                break;
 528
 529        default:
 530                qh = knav_queue_open_by_id(name, id, flags);
 531                break;
 532        }
 533        return qh;
 534}
 535EXPORT_SYMBOL_GPL(knav_queue_open);
 536
 537/**
 538 * knav_queue_close()   - close a hardware queue handle
 539 * @qh                  - handle to close
 540 */
 541void knav_queue_close(void *qhandle)
 542{
 543        struct knav_queue *qh = qhandle;
 544        struct knav_queue_inst *inst = qh->inst;
 545
 546        while (atomic_read(&qh->notifier_enabled) > 0)
 547                knav_queue_disable_notifier(qh);
 548
 549        mutex_lock(&knav_dev_lock);
 550        list_del_rcu(&qh->list);
 551        mutex_unlock(&knav_dev_lock);
 552        synchronize_rcu();
 553        if (!knav_queue_is_busy(inst)) {
 554                struct knav_range_info *range = inst->range;
 555
 556                if (range->ops && range->ops->close_queue)
 557                        range->ops->close_queue(range, inst);
 558        }
 559        devm_kfree(inst->kdev->dev, qh);
 560}
 561EXPORT_SYMBOL_GPL(knav_queue_close);
 562
 563/**
 564 * knav_queue_device_control()  - Perform control operations on a queue
 565 * @qh                          - queue handle
 566 * @cmd                         - control commands
 567 * @arg                         - command argument
 568 *
 569 * Returns 0 on success, errno otherwise.
 570 */
 571int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
 572                                unsigned long arg)
 573{
 574        struct knav_queue *qh = qhandle;
 575        struct knav_queue_notify_config *cfg;
 576        int ret;
 577
 578        switch ((int)cmd) {
 579        case KNAV_QUEUE_GET_ID:
 580                ret = qh->inst->kdev->base_id + qh->inst->id;
 581                break;
 582
 583        case KNAV_QUEUE_FLUSH:
 584                ret = knav_queue_flush(qh);
 585                break;
 586
 587        case KNAV_QUEUE_SET_NOTIFIER:
 588                cfg = (void *)arg;
 589                ret = knav_queue_set_notifier(qh, cfg);
 590                break;
 591
 592        case KNAV_QUEUE_ENABLE_NOTIFY:
 593                ret = knav_queue_enable_notifier(qh);
 594                break;
 595
 596        case KNAV_QUEUE_DISABLE_NOTIFY:
 597                ret = knav_queue_disable_notifier(qh);
 598                break;
 599
 600        case KNAV_QUEUE_GET_COUNT:
 601                ret = knav_queue_get_count(qh);
 602                break;
 603
 604        default:
 605                ret = -ENOTSUPP;
 606                break;
 607        }
 608        return ret;
 609}
 610EXPORT_SYMBOL_GPL(knav_queue_device_control);
 611
 612
 613
 614/**
 615 * knav_queue_push()    - push data (or descriptor) to the tail of a queue
 616 * @qh                  - hardware queue handle
 617 * @data                - data to push
 618 * @size                - size of data to push
 619 * @flags               - can be used to pass additional information
 620 *
 621 * Returns 0 on success, errno otherwise.
 622 */
 623int knav_queue_push(void *qhandle, dma_addr_t dma,
 624                                        unsigned size, unsigned flags)
 625{
 626        struct knav_queue *qh = qhandle;
 627        u32 val;
 628
 629        val = (u32)dma | ((size / 16) - 1);
 630        writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
 631
 632        atomic_inc(&qh->stats.pushes);
 633        return 0;
 634}
 635EXPORT_SYMBOL_GPL(knav_queue_push);
 636
 637/**
 638 * knav_queue_pop()     - pop data (or descriptor) from the head of a queue
 639 * @qh                  - hardware queue handle
 640 * @size                - (optional) size of the data pop'ed.
 641 *
 642 * Returns a DMA address on success, 0 on failure.
 643 */
 644dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
 645{
 646        struct knav_queue *qh = qhandle;
 647        struct knav_queue_inst *inst = qh->inst;
 648        dma_addr_t dma;
 649        u32 val, idx;
 650
 651        /* are we accumulated? */
 652        if (inst->descs) {
 653                if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
 654                        atomic_inc(&inst->desc_count);
 655                        return 0;
 656                }
 657                idx  = atomic_inc_return(&inst->desc_head);
 658                idx &= ACC_DESCS_MASK;
 659                val = inst->descs[idx];
 660        } else {
 661                val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
 662                if (unlikely(!val))
 663                        return 0;
 664        }
 665
 666        dma = val & DESC_PTR_MASK;
 667        if (size)
 668                *size = ((val & DESC_SIZE_MASK) + 1) * 16;
 669
 670        atomic_inc(&qh->stats.pops);
 671        return dma;
 672}
 673EXPORT_SYMBOL_GPL(knav_queue_pop);
 674
 675/* carve out descriptors and push into queue */
 676static void kdesc_fill_pool(struct knav_pool *pool)
 677{
 678        struct knav_region *region;
 679        int i;
 680
 681        region = pool->region;
 682        pool->desc_size = region->desc_size;
 683        for (i = 0; i < pool->num_desc; i++) {
 684                int index = pool->region_offset + i;
 685                dma_addr_t dma_addr;
 686                unsigned dma_size;
 687                dma_addr = region->dma_start + (region->desc_size * index);
 688                dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
 689                dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
 690                                           DMA_TO_DEVICE);
 691                knav_queue_push(pool->queue, dma_addr, dma_size, 0);
 692        }
 693}
 694
 695/* pop out descriptors and close the queue */
 696static void kdesc_empty_pool(struct knav_pool *pool)
 697{
 698        dma_addr_t dma;
 699        unsigned size;
 700        void *desc;
 701        int i;
 702
 703        if (!pool->queue)
 704                return;
 705
 706        for (i = 0;; i++) {
 707                dma = knav_queue_pop(pool->queue, &size);
 708                if (!dma)
 709                        break;
 710                desc = knav_pool_desc_dma_to_virt(pool, dma);
 711                if (!desc) {
 712                        dev_dbg(pool->kdev->dev,
 713                                "couldn't unmap desc, continuing\n");
 714                        continue;
 715                }
 716        }
 717        WARN_ON(i != pool->num_desc);
 718        knav_queue_close(pool->queue);
 719}
 720
 721
 722/* Get the DMA address of a descriptor */
 723dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
 724{
 725        struct knav_pool *pool = ph;
 726        return pool->region->dma_start + (virt - pool->region->virt_start);
 727}
 728EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
 729
 730void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
 731{
 732        struct knav_pool *pool = ph;
 733        return pool->region->virt_start + (dma - pool->region->dma_start);
 734}
 735EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
 736
 737/**
 738 * knav_pool_create()   - Create a pool of descriptors
 739 * @name                - name to give the pool handle
 740 * @num_desc            - numbers of descriptors in the pool
 741 * @region_id           - QMSS region id from which the descriptors are to be
 742 *                        allocated.
 743 *
 744 * Returns a pool handle on success.
 745 * Use IS_ERR_OR_NULL() to identify error values on return.
 746 */
 747void *knav_pool_create(const char *name,
 748                                        int num_desc, int region_id)
 749{
 750        struct knav_region *reg_itr, *region = NULL;
 751        struct knav_pool *pool, *pi;
 752        struct list_head *node;
 753        unsigned last_offset;
 754        bool slot_found;
 755        int ret;
 756
 757        if (!kdev->dev)
 758                return ERR_PTR(-ENODEV);
 759
 760        pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
 761        if (!pool) {
 762                dev_err(kdev->dev, "out of memory allocating pool\n");
 763                return ERR_PTR(-ENOMEM);
 764        }
 765
 766        for_each_region(kdev, reg_itr) {
 767                if (reg_itr->id != region_id)
 768                        continue;
 769                region = reg_itr;
 770                break;
 771        }
 772
 773        if (!region) {
 774                dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
 775                ret = -EINVAL;
 776                goto err;
 777        }
 778
 779        pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
 780        if (IS_ERR_OR_NULL(pool->queue)) {
 781                dev_err(kdev->dev,
 782                        "failed to open queue for pool(%s), error %ld\n",
 783                        name, PTR_ERR(pool->queue));
 784                ret = PTR_ERR(pool->queue);
 785                goto err;
 786        }
 787
 788        pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
 789        pool->kdev = kdev;
 790        pool->dev = kdev->dev;
 791
 792        mutex_lock(&knav_dev_lock);
 793
 794        if (num_desc > (region->num_desc - region->used_desc)) {
 795                dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
 796                        region_id, name);
 797                ret = -ENOMEM;
 798                goto err_unlock;
 799        }
 800
 801        /* Region maintains a sorted (by region offset) list of pools
 802         * use the first free slot which is large enough to accomodate
 803         * the request
 804         */
 805        last_offset = 0;
 806        slot_found = false;
 807        node = &region->pools;
 808        list_for_each_entry(pi, &region->pools, region_inst) {
 809                if ((pi->region_offset - last_offset) >= num_desc) {
 810                        slot_found = true;
 811                        break;
 812                }
 813                last_offset = pi->region_offset + pi->num_desc;
 814        }
 815        node = &pi->region_inst;
 816
 817        if (slot_found) {
 818                pool->region = region;
 819                pool->num_desc = num_desc;
 820                pool->region_offset = last_offset;
 821                region->used_desc += num_desc;
 822                list_add_tail(&pool->list, &kdev->pools);
 823                list_add_tail(&pool->region_inst, node);
 824        } else {
 825                dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
 826                        name, region_id);
 827                ret = -ENOMEM;
 828                goto err_unlock;
 829        }
 830
 831        mutex_unlock(&knav_dev_lock);
 832        kdesc_fill_pool(pool);
 833        return pool;
 834
 835err_unlock:
 836        mutex_unlock(&knav_dev_lock);
 837err:
 838        kfree(pool->name);
 839        devm_kfree(kdev->dev, pool);
 840        return ERR_PTR(ret);
 841}
 842EXPORT_SYMBOL_GPL(knav_pool_create);
 843
 844/**
 845 * knav_pool_destroy()  - Free a pool of descriptors
 846 * @pool                - pool handle
 847 */
 848void knav_pool_destroy(void *ph)
 849{
 850        struct knav_pool *pool = ph;
 851
 852        if (!pool)
 853                return;
 854
 855        if (!pool->region)
 856                return;
 857
 858        kdesc_empty_pool(pool);
 859        mutex_lock(&knav_dev_lock);
 860
 861        pool->region->used_desc -= pool->num_desc;
 862        list_del(&pool->region_inst);
 863        list_del(&pool->list);
 864
 865        mutex_unlock(&knav_dev_lock);
 866        kfree(pool->name);
 867        devm_kfree(kdev->dev, pool);
 868}
 869EXPORT_SYMBOL_GPL(knav_pool_destroy);
 870
 871
 872/**
 873 * knav_pool_desc_get() - Get a descriptor from the pool
 874 * @pool                        - pool handle
 875 *
 876 * Returns descriptor from the pool.
 877 */
 878void *knav_pool_desc_get(void *ph)
 879{
 880        struct knav_pool *pool = ph;
 881        dma_addr_t dma;
 882        unsigned size;
 883        void *data;
 884
 885        dma = knav_queue_pop(pool->queue, &size);
 886        if (unlikely(!dma))
 887                return ERR_PTR(-ENOMEM);
 888        data = knav_pool_desc_dma_to_virt(pool, dma);
 889        return data;
 890}
 891EXPORT_SYMBOL_GPL(knav_pool_desc_get);
 892
 893/**
 894 * knav_pool_desc_put() - return a descriptor to the pool
 895 * @pool                        - pool handle
 896 */
 897void knav_pool_desc_put(void *ph, void *desc)
 898{
 899        struct knav_pool *pool = ph;
 900        dma_addr_t dma;
 901        dma = knav_pool_desc_virt_to_dma(pool, desc);
 902        knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
 903}
 904EXPORT_SYMBOL_GPL(knav_pool_desc_put);
 905
 906/**
 907 * knav_pool_desc_map() - Map descriptor for DMA transfer
 908 * @pool                        - pool handle
 909 * @desc                        - address of descriptor to map
 910 * @size                        - size of descriptor to map
 911 * @dma                         - DMA address return pointer
 912 * @dma_sz                      - adjusted return pointer
 913 *
 914 * Returns 0 on success, errno otherwise.
 915 */
 916int knav_pool_desc_map(void *ph, void *desc, unsigned size,
 917                                        dma_addr_t *dma, unsigned *dma_sz)
 918{
 919        struct knav_pool *pool = ph;
 920        *dma = knav_pool_desc_virt_to_dma(pool, desc);
 921        size = min(size, pool->region->desc_size);
 922        size = ALIGN(size, SMP_CACHE_BYTES);
 923        *dma_sz = size;
 924        dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
 925
 926        /* Ensure the descriptor reaches to the memory */
 927        __iowmb();
 928
 929        return 0;
 930}
 931EXPORT_SYMBOL_GPL(knav_pool_desc_map);
 932
 933/**
 934 * knav_pool_desc_unmap()       - Unmap descriptor after DMA transfer
 935 * @pool                        - pool handle
 936 * @dma                         - DMA address of descriptor to unmap
 937 * @dma_sz                      - size of descriptor to unmap
 938 *
 939 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
 940 * error values on return.
 941 */
 942void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
 943{
 944        struct knav_pool *pool = ph;
 945        unsigned desc_sz;
 946        void *desc;
 947
 948        desc_sz = min(dma_sz, pool->region->desc_size);
 949        desc = knav_pool_desc_dma_to_virt(pool, dma);
 950        dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
 951        prefetch(desc);
 952        return desc;
 953}
 954EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
 955
 956/**
 957 * knav_pool_count()    - Get the number of descriptors in pool.
 958 * @pool                - pool handle
 959 * Returns number of elements in the pool.
 960 */
 961int knav_pool_count(void *ph)
 962{
 963        struct knav_pool *pool = ph;
 964        return knav_queue_get_count(pool->queue);
 965}
 966EXPORT_SYMBOL_GPL(knav_pool_count);
 967
 968static void knav_queue_setup_region(struct knav_device *kdev,
 969                                        struct knav_region *region)
 970{
 971        unsigned hw_num_desc, hw_desc_size, size;
 972        struct knav_reg_region __iomem  *regs;
 973        struct knav_qmgr_info *qmgr;
 974        struct knav_pool *pool;
 975        int id = region->id;
 976        struct page *page;
 977
 978        /* unused region? */
 979        if (!region->num_desc) {
 980                dev_warn(kdev->dev, "unused region %s\n", region->name);
 981                return;
 982        }
 983
 984        /* get hardware descriptor value */
 985        hw_num_desc = ilog2(region->num_desc - 1) + 1;
 986
 987        /* did we force fit ourselves into nothingness? */
 988        if (region->num_desc < 32) {
 989                region->num_desc = 0;
 990                dev_warn(kdev->dev, "too few descriptors in region %s\n",
 991                         region->name);
 992                return;
 993        }
 994
 995        size = region->num_desc * region->desc_size;
 996        region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
 997                                                GFP_DMA32);
 998        if (!region->virt_start) {
 999                region->num_desc = 0;
1000                dev_err(kdev->dev, "memory alloc failed for region %s\n",
1001                        region->name);
1002                return;
1003        }
1004        region->virt_end = region->virt_start + size;
1005        page = virt_to_page(region->virt_start);
1006
1007        region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1008                                         DMA_BIDIRECTIONAL);
1009        if (dma_mapping_error(kdev->dev, region->dma_start)) {
1010                dev_err(kdev->dev, "dma map failed for region %s\n",
1011                        region->name);
1012                goto fail;
1013        }
1014        region->dma_end = region->dma_start + size;
1015
1016        pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1017        if (!pool) {
1018                dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1019                goto fail;
1020        }
1021        pool->num_desc = 0;
1022        pool->region_offset = region->num_desc;
1023        list_add(&pool->region_inst, &region->pools);
1024
1025        dev_dbg(kdev->dev,
1026                "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1027                region->name, id, region->desc_size, region->num_desc,
1028                region->link_index, &region->dma_start, &region->dma_end,
1029                region->virt_start, region->virt_end);
1030
1031        hw_desc_size = (region->desc_size / 16) - 1;
1032        hw_num_desc -= 5;
1033
1034        for_each_qmgr(kdev, qmgr) {
1035                regs = qmgr->reg_region + id;
1036                writel_relaxed((u32)region->dma_start, &regs->base);
1037                writel_relaxed(region->link_index, &regs->start_index);
1038                writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1039                               &regs->size_count);
1040        }
1041        return;
1042
1043fail:
1044        if (region->dma_start)
1045                dma_unmap_page(kdev->dev, region->dma_start, size,
1046                                DMA_BIDIRECTIONAL);
1047        if (region->virt_start)
1048                free_pages_exact(region->virt_start, size);
1049        region->num_desc = 0;
1050        return;
1051}
1052
1053static const char *knav_queue_find_name(struct device_node *node)
1054{
1055        const char *name;
1056
1057        if (of_property_read_string(node, "label", &name) < 0)
1058                name = node->name;
1059        if (!name)
1060                name = "unknown";
1061        return name;
1062}
1063
1064static int knav_queue_setup_regions(struct knav_device *kdev,
1065                                        struct device_node *regions)
1066{
1067        struct device *dev = kdev->dev;
1068        struct knav_region *region;
1069        struct device_node *child;
1070        u32 temp[2];
1071        int ret;
1072
1073        for_each_child_of_node(regions, child) {
1074                region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1075                if (!region) {
1076                        dev_err(dev, "out of memory allocating region\n");
1077                        return -ENOMEM;
1078                }
1079
1080                region->name = knav_queue_find_name(child);
1081                of_property_read_u32(child, "id", &region->id);
1082                ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1083                if (!ret) {
1084                        region->num_desc  = temp[0];
1085                        region->desc_size = temp[1];
1086                } else {
1087                        dev_err(dev, "invalid region info %s\n", region->name);
1088                        devm_kfree(dev, region);
1089                        continue;
1090                }
1091
1092                if (!of_get_property(child, "link-index", NULL)) {
1093                        dev_err(dev, "No link info for %s\n", region->name);
1094                        devm_kfree(dev, region);
1095                        continue;
1096                }
1097                ret = of_property_read_u32(child, "link-index",
1098                                           &region->link_index);
1099                if (ret) {
1100                        dev_err(dev, "link index not found for %s\n",
1101                                region->name);
1102                        devm_kfree(dev, region);
1103                        continue;
1104                }
1105
1106                INIT_LIST_HEAD(&region->pools);
1107                list_add_tail(&region->list, &kdev->regions);
1108        }
1109        if (list_empty(&kdev->regions)) {
1110                dev_err(dev, "no valid region information found\n");
1111                return -ENODEV;
1112        }
1113
1114        /* Next, we run through the regions and set things up */
1115        for_each_region(kdev, region)
1116                knav_queue_setup_region(kdev, region);
1117
1118        return 0;
1119}
1120
1121static int knav_get_link_ram(struct knav_device *kdev,
1122                                       const char *name,
1123                                       struct knav_link_ram_block *block)
1124{
1125        struct platform_device *pdev = to_platform_device(kdev->dev);
1126        struct device_node *node = pdev->dev.of_node;
1127        u32 temp[2];
1128
1129        /*
1130         * Note: link ram resources are specified in "entry" sized units. In
1131         * reality, although entries are ~40bits in hardware, we treat them as
1132         * 64-bit entities here.
1133         *
1134         * For example, to specify the internal link ram for Keystone-I class
1135         * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1136         *
1137         * This gets a bit weird when other link rams are used.  For example,
1138         * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1139         * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1140         * which accounts for 64-bits per entry, for 16K entries.
1141         */
1142        if (!of_property_read_u32_array(node, name , temp, 2)) {
1143                if (temp[0]) {
1144                        /*
1145                         * queue_base specified => using internal or onchip
1146                         * link ram WARNING - we do not "reserve" this block
1147                         */
1148                        block->dma = (dma_addr_t)temp[0];
1149                        block->virt = NULL;
1150                        block->size = temp[1];
1151                } else {
1152                        block->size = temp[1];
1153                        /* queue_base not specific => allocate requested size */
1154                        block->virt = dmam_alloc_coherent(kdev->dev,
1155                                                  8 * block->size, &block->dma,
1156                                                  GFP_KERNEL);
1157                        if (!block->virt) {
1158                                dev_err(kdev->dev, "failed to alloc linkram\n");
1159                                return -ENOMEM;
1160                        }
1161                }
1162        } else {
1163                return -ENODEV;
1164        }
1165        return 0;
1166}
1167
1168static int knav_queue_setup_link_ram(struct knav_device *kdev)
1169{
1170        struct knav_link_ram_block *block;
1171        struct knav_qmgr_info *qmgr;
1172
1173        for_each_qmgr(kdev, qmgr) {
1174                block = &kdev->link_rams[0];
1175                dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1176                        &block->dma, block->virt, block->size);
1177                writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1178                writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0);
1179
1180                block++;
1181                if (!block->size)
1182                        continue;
1183
1184                dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1185                        &block->dma, block->virt, block->size);
1186                writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1187        }
1188
1189        return 0;
1190}
1191
1192static int knav_setup_queue_range(struct knav_device *kdev,
1193                                        struct device_node *node)
1194{
1195        struct device *dev = kdev->dev;
1196        struct knav_range_info *range;
1197        struct knav_qmgr_info *qmgr;
1198        u32 temp[2], start, end, id, index;
1199        int ret, i;
1200
1201        range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1202        if (!range) {
1203                dev_err(dev, "out of memory allocating range\n");
1204                return -ENOMEM;
1205        }
1206
1207        range->kdev = kdev;
1208        range->name = knav_queue_find_name(node);
1209        ret = of_property_read_u32_array(node, "qrange", temp, 2);
1210        if (!ret) {
1211                range->queue_base = temp[0] - kdev->base_id;
1212                range->num_queues = temp[1];
1213        } else {
1214                dev_err(dev, "invalid queue range %s\n", range->name);
1215                devm_kfree(dev, range);
1216                return -EINVAL;
1217        }
1218
1219        for (i = 0; i < RANGE_MAX_IRQS; i++) {
1220                struct of_phandle_args oirq;
1221
1222                if (of_irq_parse_one(node, i, &oirq))
1223                        break;
1224
1225                range->irqs[i].irq = irq_create_of_mapping(&oirq);
1226                if (range->irqs[i].irq == IRQ_NONE)
1227                        break;
1228
1229                range->num_irqs++;
1230
1231                if (oirq.args_count == 3)
1232                        range->irqs[i].cpu_map =
1233                                (oirq.args[2] & 0x0000ff00) >> 8;
1234        }
1235
1236        range->num_irqs = min(range->num_irqs, range->num_queues);
1237        if (range->num_irqs)
1238                range->flags |= RANGE_HAS_IRQ;
1239
1240        if (of_get_property(node, "qalloc-by-id", NULL))
1241                range->flags |= RANGE_RESERVED;
1242
1243        if (of_get_property(node, "accumulator", NULL)) {
1244                ret = knav_init_acc_range(kdev, node, range);
1245                if (ret < 0) {
1246                        devm_kfree(dev, range);
1247                        return ret;
1248                }
1249        } else {
1250                range->ops = &knav_gp_range_ops;
1251        }
1252
1253        /* set threshold to 1, and flush out the queues */
1254        for_each_qmgr(kdev, qmgr) {
1255                start = max(qmgr->start_queue, range->queue_base);
1256                end   = min(qmgr->start_queue + qmgr->num_queues,
1257                            range->queue_base + range->num_queues);
1258                for (id = start; id < end; id++) {
1259                        index = id - qmgr->start_queue;
1260                        writel_relaxed(THRESH_GTE | 1,
1261                                       &qmgr->reg_peek[index].ptr_size_thresh);
1262                        writel_relaxed(0,
1263                                       &qmgr->reg_push[index].ptr_size_thresh);
1264                }
1265        }
1266
1267        list_add_tail(&range->list, &kdev->queue_ranges);
1268        dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1269                range->name, range->queue_base,
1270                range->queue_base + range->num_queues - 1,
1271                range->num_irqs,
1272                (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1273                (range->flags & RANGE_RESERVED) ? ", reserved" : "",
1274                (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1275        kdev->num_queues_in_use += range->num_queues;
1276        return 0;
1277}
1278
1279static int knav_setup_queue_pools(struct knav_device *kdev,
1280                                   struct device_node *queue_pools)
1281{
1282        struct device_node *type, *range;
1283        int ret;
1284
1285        for_each_child_of_node(queue_pools, type) {
1286                for_each_child_of_node(type, range) {
1287                        ret = knav_setup_queue_range(kdev, range);
1288                        /* return value ignored, we init the rest... */
1289                }
1290        }
1291
1292        /* ... and barf if they all failed! */
1293        if (list_empty(&kdev->queue_ranges)) {
1294                dev_err(kdev->dev, "no valid queue range found\n");
1295                return -ENODEV;
1296        }
1297        return 0;
1298}
1299
1300static void knav_free_queue_range(struct knav_device *kdev,
1301                                  struct knav_range_info *range)
1302{
1303        if (range->ops && range->ops->free_range)
1304                range->ops->free_range(range);
1305        list_del(&range->list);
1306        devm_kfree(kdev->dev, range);
1307}
1308
1309static void knav_free_queue_ranges(struct knav_device *kdev)
1310{
1311        struct knav_range_info *range;
1312
1313        for (;;) {
1314                range = first_queue_range(kdev);
1315                if (!range)
1316                        break;
1317                knav_free_queue_range(kdev, range);
1318        }
1319}
1320
1321static void knav_queue_free_regions(struct knav_device *kdev)
1322{
1323        struct knav_region *region;
1324        struct knav_pool *pool, *tmp;
1325        unsigned size;
1326
1327        for (;;) {
1328                region = first_region(kdev);
1329                if (!region)
1330                        break;
1331                list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1332                        knav_pool_destroy(pool);
1333
1334                size = region->virt_end - region->virt_start;
1335                if (size)
1336                        free_pages_exact(region->virt_start, size);
1337                list_del(&region->list);
1338                devm_kfree(kdev->dev, region);
1339        }
1340}
1341
1342static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1343                                        struct device_node *node, int index)
1344{
1345        struct resource res;
1346        void __iomem *regs;
1347        int ret;
1348
1349        ret = of_address_to_resource(node, index, &res);
1350        if (ret) {
1351                dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
1352                        node->name, index);
1353                return ERR_PTR(ret);
1354        }
1355
1356        regs = devm_ioremap_resource(kdev->dev, &res);
1357        if (IS_ERR(regs))
1358                dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
1359                        index, node->name);
1360        return regs;
1361}
1362
1363static int knav_queue_init_qmgrs(struct knav_device *kdev,
1364                                        struct device_node *qmgrs)
1365{
1366        struct device *dev = kdev->dev;
1367        struct knav_qmgr_info *qmgr;
1368        struct device_node *child;
1369        u32 temp[2];
1370        int ret;
1371
1372        for_each_child_of_node(qmgrs, child) {
1373                qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1374                if (!qmgr) {
1375                        dev_err(dev, "out of memory allocating qmgr\n");
1376                        return -ENOMEM;
1377                }
1378
1379                ret = of_property_read_u32_array(child, "managed-queues",
1380                                                 temp, 2);
1381                if (!ret) {
1382                        qmgr->start_queue = temp[0];
1383                        qmgr->num_queues = temp[1];
1384                } else {
1385                        dev_err(dev, "invalid qmgr queue range\n");
1386                        devm_kfree(dev, qmgr);
1387                        continue;
1388                }
1389
1390                dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1391                         qmgr->start_queue, qmgr->num_queues);
1392
1393                qmgr->reg_peek =
1394                        knav_queue_map_reg(kdev, child,
1395                                           KNAV_QUEUE_PEEK_REG_INDEX);
1396                qmgr->reg_status =
1397                        knav_queue_map_reg(kdev, child,
1398                                           KNAV_QUEUE_STATUS_REG_INDEX);
1399                qmgr->reg_config =
1400                        knav_queue_map_reg(kdev, child,
1401                                           KNAV_QUEUE_CONFIG_REG_INDEX);
1402                qmgr->reg_region =
1403                        knav_queue_map_reg(kdev, child,
1404                                           KNAV_QUEUE_REGION_REG_INDEX);
1405                qmgr->reg_push =
1406                        knav_queue_map_reg(kdev, child,
1407                                           KNAV_QUEUE_PUSH_REG_INDEX);
1408                qmgr->reg_pop =
1409                        knav_queue_map_reg(kdev, child,
1410                                           KNAV_QUEUE_POP_REG_INDEX);
1411
1412                if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) ||
1413                    IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1414                    IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) {
1415                        dev_err(dev, "failed to map qmgr regs\n");
1416                        if (!IS_ERR(qmgr->reg_peek))
1417                                devm_iounmap(dev, qmgr->reg_peek);
1418                        if (!IS_ERR(qmgr->reg_status))
1419                                devm_iounmap(dev, qmgr->reg_status);
1420                        if (!IS_ERR(qmgr->reg_config))
1421                                devm_iounmap(dev, qmgr->reg_config);
1422                        if (!IS_ERR(qmgr->reg_region))
1423                                devm_iounmap(dev, qmgr->reg_region);
1424                        if (!IS_ERR(qmgr->reg_push))
1425                                devm_iounmap(dev, qmgr->reg_push);
1426                        if (!IS_ERR(qmgr->reg_pop))
1427                                devm_iounmap(dev, qmgr->reg_pop);
1428                        devm_kfree(dev, qmgr);
1429                        continue;
1430                }
1431
1432                list_add_tail(&qmgr->list, &kdev->qmgrs);
1433                dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1434                         qmgr->start_queue, qmgr->num_queues,
1435                         qmgr->reg_peek, qmgr->reg_status,
1436                         qmgr->reg_config, qmgr->reg_region,
1437                         qmgr->reg_push, qmgr->reg_pop);
1438        }
1439        return 0;
1440}
1441
1442static int knav_queue_init_pdsps(struct knav_device *kdev,
1443                                        struct device_node *pdsps)
1444{
1445        struct device *dev = kdev->dev;
1446        struct knav_pdsp_info *pdsp;
1447        struct device_node *child;
1448
1449        for_each_child_of_node(pdsps, child) {
1450                pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1451                if (!pdsp) {
1452                        dev_err(dev, "out of memory allocating pdsp\n");
1453                        return -ENOMEM;
1454                }
1455                pdsp->name = knav_queue_find_name(child);
1456                pdsp->iram =
1457                        knav_queue_map_reg(kdev, child,
1458                                           KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1459                pdsp->regs =
1460                        knav_queue_map_reg(kdev, child,
1461                                           KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1462                pdsp->intd =
1463                        knav_queue_map_reg(kdev, child,
1464                                           KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1465                pdsp->command =
1466                        knav_queue_map_reg(kdev, child,
1467                                           KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1468
1469                if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1470                    IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1471                        dev_err(dev, "failed to map pdsp %s regs\n",
1472                                pdsp->name);
1473                        if (!IS_ERR(pdsp->command))
1474                                devm_iounmap(dev, pdsp->command);
1475                        if (!IS_ERR(pdsp->iram))
1476                                devm_iounmap(dev, pdsp->iram);
1477                        if (!IS_ERR(pdsp->regs))
1478                                devm_iounmap(dev, pdsp->regs);
1479                        if (!IS_ERR(pdsp->intd))
1480                                devm_iounmap(dev, pdsp->intd);
1481                        devm_kfree(dev, pdsp);
1482                        continue;
1483                }
1484                of_property_read_u32(child, "id", &pdsp->id);
1485                list_add_tail(&pdsp->list, &kdev->pdsps);
1486                dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1487                        pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1488                        pdsp->intd);
1489        }
1490        return 0;
1491}
1492
1493static int knav_queue_stop_pdsp(struct knav_device *kdev,
1494                          struct knav_pdsp_info *pdsp)
1495{
1496        u32 val, timeout = 1000;
1497        int ret;
1498
1499        val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1500        writel_relaxed(val, &pdsp->regs->control);
1501        ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1502                                        PDSP_CTRL_RUNNING);
1503        if (ret < 0) {
1504                dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1505                return ret;
1506        }
1507        pdsp->loaded = false;
1508        pdsp->started = false;
1509        return 0;
1510}
1511
1512static int knav_queue_load_pdsp(struct knav_device *kdev,
1513                          struct knav_pdsp_info *pdsp)
1514{
1515        int i, ret, fwlen;
1516        const struct firmware *fw;
1517        bool found = false;
1518        u32 *fwdata;
1519
1520        for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1521                if (knav_acc_firmwares[i]) {
1522                        ret = request_firmware_direct(&fw,
1523                                                      knav_acc_firmwares[i],
1524                                                      kdev->dev);
1525                        if (!ret) {
1526                                found = true;
1527                                break;
1528                        }
1529                }
1530        }
1531
1532        if (!found) {
1533                dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1534                return -ENODEV;
1535        }
1536
1537        dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1538                 knav_acc_firmwares[i]);
1539
1540        writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1541        /* download the firmware */
1542        fwdata = (u32 *)fw->data;
1543        fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1544        for (i = 0; i < fwlen; i++)
1545                writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1546
1547        release_firmware(fw);
1548        return 0;
1549}
1550
1551static int knav_queue_start_pdsp(struct knav_device *kdev,
1552                           struct knav_pdsp_info *pdsp)
1553{
1554        u32 val, timeout = 1000;
1555        int ret;
1556
1557        /* write a command for sync */
1558        writel_relaxed(0xffffffff, pdsp->command);
1559        while (readl_relaxed(pdsp->command) != 0xffffffff)
1560                cpu_relax();
1561
1562        /* soft reset the PDSP */
1563        val  = readl_relaxed(&pdsp->regs->control);
1564        val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1565        writel_relaxed(val, &pdsp->regs->control);
1566
1567        /* enable pdsp */
1568        val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1569        writel_relaxed(val, &pdsp->regs->control);
1570
1571        /* wait for command register to clear */
1572        ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1573        if (ret < 0) {
1574                dev_err(kdev->dev,
1575                        "timed out on pdsp %s command register wait\n",
1576                        pdsp->name);
1577                return ret;
1578        }
1579        return 0;
1580}
1581
1582static void knav_queue_stop_pdsps(struct knav_device *kdev)
1583{
1584        struct knav_pdsp_info *pdsp;
1585
1586        /* disable all pdsps */
1587        for_each_pdsp(kdev, pdsp)
1588                knav_queue_stop_pdsp(kdev, pdsp);
1589}
1590
1591static int knav_queue_start_pdsps(struct knav_device *kdev)
1592{
1593        struct knav_pdsp_info *pdsp;
1594        int ret;
1595
1596        knav_queue_stop_pdsps(kdev);
1597        /* now load them all. We return success even if pdsp
1598         * is not loaded as acc channels are optional on having
1599         * firmware availability in the system. We set the loaded
1600         * and stated flag and when initialize the acc range, check
1601         * it and init the range only if pdsp is started.
1602         */
1603        for_each_pdsp(kdev, pdsp) {
1604                ret = knav_queue_load_pdsp(kdev, pdsp);
1605                if (!ret)
1606                        pdsp->loaded = true;
1607        }
1608
1609        for_each_pdsp(kdev, pdsp) {
1610                if (pdsp->loaded) {
1611                        ret = knav_queue_start_pdsp(kdev, pdsp);
1612                        if (!ret)
1613                                pdsp->started = true;
1614                }
1615        }
1616        return 0;
1617}
1618
1619static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1620{
1621        struct knav_qmgr_info *qmgr;
1622
1623        for_each_qmgr(kdev, qmgr) {
1624                if ((id >= qmgr->start_queue) &&
1625                    (id < qmgr->start_queue + qmgr->num_queues))
1626                        return qmgr;
1627        }
1628        return NULL;
1629}
1630
1631static int knav_queue_init_queue(struct knav_device *kdev,
1632                                        struct knav_range_info *range,
1633                                        struct knav_queue_inst *inst,
1634                                        unsigned id)
1635{
1636        char irq_name[KNAV_NAME_SIZE];
1637        inst->qmgr = knav_find_qmgr(id);
1638        if (!inst->qmgr)
1639                return -1;
1640
1641        INIT_LIST_HEAD(&inst->handles);
1642        inst->kdev = kdev;
1643        inst->range = range;
1644        inst->irq_num = -1;
1645        inst->id = id;
1646        scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1647        inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1648
1649        if (range->ops && range->ops->init_queue)
1650                return range->ops->init_queue(range, inst);
1651        else
1652                return 0;
1653}
1654
1655static int knav_queue_init_queues(struct knav_device *kdev)
1656{
1657        struct knav_range_info *range;
1658        int size, id, base_idx;
1659        int idx = 0, ret = 0;
1660
1661        /* how much do we need for instance data? */
1662        size = sizeof(struct knav_queue_inst);
1663
1664        /* round this up to a power of 2, keep the index to instance
1665         * arithmetic fast.
1666         * */
1667        kdev->inst_shift = order_base_2(size);
1668        size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1669        kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1670        if (!kdev->instances)
1671                return -ENOMEM;
1672
1673        for_each_queue_range(kdev, range) {
1674                if (range->ops && range->ops->init_range)
1675                        range->ops->init_range(range);
1676                base_idx = idx;
1677                for (id = range->queue_base;
1678                     id < range->queue_base + range->num_queues; id++, idx++) {
1679                        ret = knav_queue_init_queue(kdev, range,
1680                                        knav_queue_idx_to_inst(kdev, idx), id);
1681                        if (ret < 0)
1682                                return ret;
1683                }
1684                range->queue_base_inst =
1685                        knav_queue_idx_to_inst(kdev, base_idx);
1686        }
1687        return 0;
1688}
1689
1690static int knav_queue_probe(struct platform_device *pdev)
1691{
1692        struct device_node *node = pdev->dev.of_node;
1693        struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1694        struct device *dev = &pdev->dev;
1695        u32 temp[2];
1696        int ret;
1697
1698        if (!node) {
1699                dev_err(dev, "device tree info unavailable\n");
1700                return -ENODEV;
1701        }
1702
1703        kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1704        if (!kdev) {
1705                dev_err(dev, "memory allocation failed\n");
1706                return -ENOMEM;
1707        }
1708
1709        platform_set_drvdata(pdev, kdev);
1710        kdev->dev = dev;
1711        INIT_LIST_HEAD(&kdev->queue_ranges);
1712        INIT_LIST_HEAD(&kdev->qmgrs);
1713        INIT_LIST_HEAD(&kdev->pools);
1714        INIT_LIST_HEAD(&kdev->regions);
1715        INIT_LIST_HEAD(&kdev->pdsps);
1716
1717        pm_runtime_enable(&pdev->dev);
1718        ret = pm_runtime_get_sync(&pdev->dev);
1719        if (ret < 0) {
1720                dev_err(dev, "Failed to enable QMSS\n");
1721                return ret;
1722        }
1723
1724        if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1725                dev_err(dev, "queue-range not specified\n");
1726                ret = -ENODEV;
1727                goto err;
1728        }
1729        kdev->base_id    = temp[0];
1730        kdev->num_queues = temp[1];
1731
1732        /* Initialize queue managers using device tree configuration */
1733        qmgrs =  of_get_child_by_name(node, "qmgrs");
1734        if (!qmgrs) {
1735                dev_err(dev, "queue manager info not specified\n");
1736                ret = -ENODEV;
1737                goto err;
1738        }
1739        ret = knav_queue_init_qmgrs(kdev, qmgrs);
1740        of_node_put(qmgrs);
1741        if (ret)
1742                goto err;
1743
1744        /* get pdsp configuration values from device tree */
1745        pdsps =  of_get_child_by_name(node, "pdsps");
1746        if (pdsps) {
1747                ret = knav_queue_init_pdsps(kdev, pdsps);
1748                if (ret)
1749                        goto err;
1750
1751                ret = knav_queue_start_pdsps(kdev);
1752                if (ret)
1753                        goto err;
1754        }
1755        of_node_put(pdsps);
1756
1757        /* get usable queue range values from device tree */
1758        queue_pools = of_get_child_by_name(node, "queue-pools");
1759        if (!queue_pools) {
1760                dev_err(dev, "queue-pools not specified\n");
1761                ret = -ENODEV;
1762                goto err;
1763        }
1764        ret = knav_setup_queue_pools(kdev, queue_pools);
1765        of_node_put(queue_pools);
1766        if (ret)
1767                goto err;
1768
1769        ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1770        if (ret) {
1771                dev_err(kdev->dev, "could not setup linking ram\n");
1772                goto err;
1773        }
1774
1775        ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1776        if (ret) {
1777                /*
1778                 * nothing really, we have one linking ram already, so we just
1779                 * live within our means
1780                 */
1781        }
1782
1783        ret = knav_queue_setup_link_ram(kdev);
1784        if (ret)
1785                goto err;
1786
1787        regions =  of_get_child_by_name(node, "descriptor-regions");
1788        if (!regions) {
1789                dev_err(dev, "descriptor-regions not specified\n");
1790                goto err;
1791        }
1792        ret = knav_queue_setup_regions(kdev, regions);
1793        of_node_put(regions);
1794        if (ret)
1795                goto err;
1796
1797        ret = knav_queue_init_queues(kdev);
1798        if (ret < 0) {
1799                dev_err(dev, "hwqueue initialization failed\n");
1800                goto err;
1801        }
1802
1803        debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1804                            &knav_queue_debug_ops);
1805        return 0;
1806
1807err:
1808        knav_queue_stop_pdsps(kdev);
1809        knav_queue_free_regions(kdev);
1810        knav_free_queue_ranges(kdev);
1811        pm_runtime_put_sync(&pdev->dev);
1812        pm_runtime_disable(&pdev->dev);
1813        return ret;
1814}
1815
1816static int knav_queue_remove(struct platform_device *pdev)
1817{
1818        /* TODO: Free resources */
1819        pm_runtime_put_sync(&pdev->dev);
1820        pm_runtime_disable(&pdev->dev);
1821        return 0;
1822}
1823
1824/* Match table for of_platform binding */
1825static struct of_device_id keystone_qmss_of_match[] = {
1826        { .compatible = "ti,keystone-navigator-qmss", },
1827        {},
1828};
1829MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1830
1831static struct platform_driver keystone_qmss_driver = {
1832        .probe          = knav_queue_probe,
1833        .remove         = knav_queue_remove,
1834        .driver         = {
1835                .name   = "keystone-navigator-qmss",
1836                .of_match_table = keystone_qmss_of_match,
1837        },
1838};
1839module_platform_driver(keystone_qmss_driver);
1840
1841MODULE_LICENSE("GPL v2");
1842MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1843MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1844MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
1845