1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/debugfs.h>
20#include <linux/dma-mapping.h>
21#include <linux/firmware.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/module.h>
25#include <linux/of_address.h>
26#include <linux/of_device.h>
27#include <linux/of_irq.h>
28#include <linux/pm_runtime.h>
29#include <linux/slab.h>
30#include <linux/soc/ti/knav_qmss.h>
31
32#include "knav_qmss.h"
33
34static struct knav_device *kdev;
35static DEFINE_MUTEX(knav_dev_lock);
36
37
38#define KNAV_QUEUE_PEEK_REG_INDEX 0
39#define KNAV_QUEUE_STATUS_REG_INDEX 1
40#define KNAV_QUEUE_CONFIG_REG_INDEX 2
41#define KNAV_QUEUE_REGION_REG_INDEX 3
42#define KNAV_QUEUE_PUSH_REG_INDEX 4
43#define KNAV_QUEUE_POP_REG_INDEX 5
44
45
46
47
48
49
50#define KNAV_L_QUEUE_CONFIG_REG_INDEX 1
51#define KNAV_L_QUEUE_REGION_REG_INDEX 2
52#define KNAV_L_QUEUE_PUSH_REG_INDEX 3
53
54
55#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
56#define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
57#define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
58#define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
59
60#define knav_queue_idx_to_inst(kdev, idx) \
61 (kdev->instances + (idx << kdev->inst_shift))
62
63#define for_each_handle_rcu(qh, inst) \
64 list_for_each_entry_rcu(qh, &inst->handles, list)
65
66#define for_each_instance(idx, inst, kdev) \
67 for (idx = 0, inst = kdev->instances; \
68 idx < (kdev)->num_queues_in_use; \
69 idx++, inst = knav_queue_idx_to_inst(kdev, idx))
70
71
72
73
74
75const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
76
77static bool device_ready;
78bool knav_qmss_device_ready(void)
79{
80 return device_ready;
81}
82EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
83
84
85
86
87
88
89void knav_queue_notify(struct knav_queue_inst *inst)
90{
91 struct knav_queue *qh;
92
93 if (!inst)
94 return;
95
96 rcu_read_lock();
97 for_each_handle_rcu(qh, inst) {
98 if (atomic_read(&qh->notifier_enabled) <= 0)
99 continue;
100 if (WARN_ON(!qh->notifier_fn))
101 continue;
102 this_cpu_inc(qh->stats->notifies);
103 qh->notifier_fn(qh->notifier_fn_arg);
104 }
105 rcu_read_unlock();
106}
107EXPORT_SYMBOL_GPL(knav_queue_notify);
108
109static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
110{
111 struct knav_queue_inst *inst = _instdata;
112
113 knav_queue_notify(inst);
114 return IRQ_HANDLED;
115}
116
117static int knav_queue_setup_irq(struct knav_range_info *range,
118 struct knav_queue_inst *inst)
119{
120 unsigned queue = inst->id - range->queue_base;
121 int ret = 0, irq;
122
123 if (range->flags & RANGE_HAS_IRQ) {
124 irq = range->irqs[queue].irq;
125 ret = request_irq(irq, knav_queue_int_handler, 0,
126 inst->irq_name, inst);
127 if (ret)
128 return ret;
129 disable_irq(irq);
130 if (range->irqs[queue].cpu_mask) {
131 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
132 if (ret) {
133 dev_warn(range->kdev->dev,
134 "Failed to set IRQ affinity\n");
135 return ret;
136 }
137 }
138 }
139 return ret;
140}
141
142static void knav_queue_free_irq(struct knav_queue_inst *inst)
143{
144 struct knav_range_info *range = inst->range;
145 unsigned queue = inst->id - inst->range->queue_base;
146 int irq;
147
148 if (range->flags & RANGE_HAS_IRQ) {
149 irq = range->irqs[queue].irq;
150 irq_set_affinity_hint(irq, NULL);
151 free_irq(irq, inst);
152 }
153}
154
155static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
156{
157 return !list_empty(&inst->handles);
158}
159
160static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
161{
162 return inst->range->flags & RANGE_RESERVED;
163}
164
165static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
166{
167 struct knav_queue *tmp;
168
169 rcu_read_lock();
170 for_each_handle_rcu(tmp, inst) {
171 if (tmp->flags & KNAV_QUEUE_SHARED) {
172 rcu_read_unlock();
173 return true;
174 }
175 }
176 rcu_read_unlock();
177 return false;
178}
179
180static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
181 unsigned type)
182{
183 if ((type == KNAV_QUEUE_QPEND) &&
184 (inst->range->flags & RANGE_HAS_IRQ)) {
185 return true;
186 } else if ((type == KNAV_QUEUE_ACC) &&
187 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
188 return true;
189 } else if ((type == KNAV_QUEUE_GP) &&
190 !(inst->range->flags &
191 (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
192 return true;
193 }
194 return false;
195}
196
197static inline struct knav_queue_inst *
198knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
199{
200 struct knav_queue_inst *inst;
201 int idx;
202
203 for_each_instance(idx, inst, kdev) {
204 if (inst->id == id)
205 return inst;
206 }
207 return NULL;
208}
209
210static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
211{
212 if (kdev->base_id <= id &&
213 kdev->base_id + kdev->num_queues > id) {
214 id -= kdev->base_id;
215 return knav_queue_match_id_to_inst(kdev, id);
216 }
217 return NULL;
218}
219
220static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
221 const char *name, unsigned flags)
222{
223 struct knav_queue *qh;
224 unsigned id;
225 int ret = 0;
226
227 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
228 if (!qh)
229 return ERR_PTR(-ENOMEM);
230
231 qh->stats = alloc_percpu(struct knav_queue_stats);
232 if (!qh->stats) {
233 ret = -ENOMEM;
234 goto err;
235 }
236
237 qh->flags = flags;
238 qh->inst = inst;
239 id = inst->id - inst->qmgr->start_queue;
240 qh->reg_push = &inst->qmgr->reg_push[id];
241 qh->reg_pop = &inst->qmgr->reg_pop[id];
242 qh->reg_peek = &inst->qmgr->reg_peek[id];
243
244
245 if (!knav_queue_is_busy(inst)) {
246 struct knav_range_info *range = inst->range;
247
248 inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
249 if (range->ops && range->ops->open_queue)
250 ret = range->ops->open_queue(range, inst, flags);
251
252 if (ret)
253 goto err;
254 }
255 list_add_tail_rcu(&qh->list, &inst->handles);
256 return qh;
257
258err:
259 if (qh->stats)
260 free_percpu(qh->stats);
261 devm_kfree(inst->kdev->dev, qh);
262 return ERR_PTR(ret);
263}
264
265static struct knav_queue *
266knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
267{
268 struct knav_queue_inst *inst;
269 struct knav_queue *qh;
270
271 mutex_lock(&knav_dev_lock);
272
273 qh = ERR_PTR(-ENODEV);
274 inst = knav_queue_find_by_id(id);
275 if (!inst)
276 goto unlock_ret;
277
278 qh = ERR_PTR(-EEXIST);
279 if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
280 goto unlock_ret;
281
282 qh = ERR_PTR(-EBUSY);
283 if ((flags & KNAV_QUEUE_SHARED) &&
284 (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
285 goto unlock_ret;
286
287 qh = __knav_queue_open(inst, name, flags);
288
289unlock_ret:
290 mutex_unlock(&knav_dev_lock);
291
292 return qh;
293}
294
295static struct knav_queue *knav_queue_open_by_type(const char *name,
296 unsigned type, unsigned flags)
297{
298 struct knav_queue_inst *inst;
299 struct knav_queue *qh = ERR_PTR(-EINVAL);
300 int idx;
301
302 mutex_lock(&knav_dev_lock);
303
304 for_each_instance(idx, inst, kdev) {
305 if (knav_queue_is_reserved(inst))
306 continue;
307 if (!knav_queue_match_type(inst, type))
308 continue;
309 if (knav_queue_is_busy(inst))
310 continue;
311 qh = __knav_queue_open(inst, name, flags);
312 goto unlock_ret;
313 }
314
315unlock_ret:
316 mutex_unlock(&knav_dev_lock);
317 return qh;
318}
319
320static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
321{
322 struct knav_range_info *range = inst->range;
323
324 if (range->ops && range->ops->set_notify)
325 range->ops->set_notify(range, inst, enabled);
326}
327
328static int knav_queue_enable_notifier(struct knav_queue *qh)
329{
330 struct knav_queue_inst *inst = qh->inst;
331 bool first;
332
333 if (WARN_ON(!qh->notifier_fn))
334 return -EINVAL;
335
336
337 first = (atomic_inc_return(&qh->notifier_enabled) == 1);
338 if (!first)
339 return 0;
340
341
342 first = (atomic_inc_return(&inst->num_notifiers) == 1);
343 if (first)
344 knav_queue_set_notify(inst, true);
345
346 return 0;
347}
348
349static int knav_queue_disable_notifier(struct knav_queue *qh)
350{
351 struct knav_queue_inst *inst = qh->inst;
352 bool last;
353
354 last = (atomic_dec_return(&qh->notifier_enabled) == 0);
355 if (!last)
356 return 0;
357
358 last = (atomic_dec_return(&inst->num_notifiers) == 0);
359 if (last)
360 knav_queue_set_notify(inst, false);
361
362 return 0;
363}
364
365static int knav_queue_set_notifier(struct knav_queue *qh,
366 struct knav_queue_notify_config *cfg)
367{
368 knav_queue_notify_fn old_fn = qh->notifier_fn;
369
370 if (!cfg)
371 return -EINVAL;
372
373 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
374 return -ENOTSUPP;
375
376 if (!cfg->fn && old_fn)
377 knav_queue_disable_notifier(qh);
378
379 qh->notifier_fn = cfg->fn;
380 qh->notifier_fn_arg = cfg->fn_arg;
381
382 if (cfg->fn && !old_fn)
383 knav_queue_enable_notifier(qh);
384
385 return 0;
386}
387
388static int knav_gp_set_notify(struct knav_range_info *range,
389 struct knav_queue_inst *inst,
390 bool enabled)
391{
392 unsigned queue;
393
394 if (range->flags & RANGE_HAS_IRQ) {
395 queue = inst->id - range->queue_base;
396 if (enabled)
397 enable_irq(range->irqs[queue].irq);
398 else
399 disable_irq_nosync(range->irqs[queue].irq);
400 }
401 return 0;
402}
403
404static int knav_gp_open_queue(struct knav_range_info *range,
405 struct knav_queue_inst *inst, unsigned flags)
406{
407 return knav_queue_setup_irq(range, inst);
408}
409
410static int knav_gp_close_queue(struct knav_range_info *range,
411 struct knav_queue_inst *inst)
412{
413 knav_queue_free_irq(inst);
414 return 0;
415}
416
417struct knav_range_ops knav_gp_range_ops = {
418 .set_notify = knav_gp_set_notify,
419 .open_queue = knav_gp_open_queue,
420 .close_queue = knav_gp_close_queue,
421};
422
423
424static int knav_queue_get_count(void *qhandle)
425{
426 struct knav_queue *qh = qhandle;
427 struct knav_queue_inst *inst = qh->inst;
428
429 return readl_relaxed(&qh->reg_peek[0].entry_count) +
430 atomic_read(&inst->desc_count);
431}
432
433static void knav_queue_debug_show_instance(struct seq_file *s,
434 struct knav_queue_inst *inst)
435{
436 struct knav_device *kdev = inst->kdev;
437 struct knav_queue *qh;
438 int cpu = 0;
439 int pushes = 0;
440 int pops = 0;
441 int push_errors = 0;
442 int pop_errors = 0;
443 int notifies = 0;
444
445 if (!knav_queue_is_busy(inst))
446 return;
447
448 seq_printf(s, "\tqueue id %d (%s)\n",
449 kdev->base_id + inst->id, inst->name);
450 for_each_handle_rcu(qh, inst) {
451 for_each_possible_cpu(cpu) {
452 pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
453 pops += per_cpu_ptr(qh->stats, cpu)->pops;
454 push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
455 pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
456 notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
457 }
458
459 seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
460 qh,
461 pushes,
462 pops,
463 knav_queue_get_count(qh),
464 notifies,
465 push_errors,
466 pop_errors);
467 }
468}
469
470static int knav_queue_debug_show(struct seq_file *s, void *v)
471{
472 struct knav_queue_inst *inst;
473 int idx;
474
475 mutex_lock(&knav_dev_lock);
476 seq_printf(s, "%s: %u-%u\n",
477 dev_name(kdev->dev), kdev->base_id,
478 kdev->base_id + kdev->num_queues - 1);
479 for_each_instance(idx, inst, kdev)
480 knav_queue_debug_show_instance(s, inst);
481 mutex_unlock(&knav_dev_lock);
482
483 return 0;
484}
485
486static int knav_queue_debug_open(struct inode *inode, struct file *file)
487{
488 return single_open(file, knav_queue_debug_show, NULL);
489}
490
491static const struct file_operations knav_queue_debug_ops = {
492 .open = knav_queue_debug_open,
493 .read = seq_read,
494 .llseek = seq_lseek,
495 .release = single_release,
496};
497
498static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
499 u32 flags)
500{
501 unsigned long end;
502 u32 val = 0;
503
504 end = jiffies + msecs_to_jiffies(timeout);
505 while (time_after(end, jiffies)) {
506 val = readl_relaxed(addr);
507 if (flags)
508 val &= flags;
509 if (!val)
510 break;
511 cpu_relax();
512 }
513 return val ? -ETIMEDOUT : 0;
514}
515
516
517static int knav_queue_flush(struct knav_queue *qh)
518{
519 struct knav_queue_inst *inst = qh->inst;
520 unsigned id = inst->id - inst->qmgr->start_queue;
521
522 atomic_set(&inst->desc_count, 0);
523 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
524 return 0;
525}
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541void *knav_queue_open(const char *name, unsigned id,
542 unsigned flags)
543{
544 struct knav_queue *qh = ERR_PTR(-EINVAL);
545
546 switch (id) {
547 case KNAV_QUEUE_QPEND:
548 case KNAV_QUEUE_ACC:
549 case KNAV_QUEUE_GP:
550 qh = knav_queue_open_by_type(name, id, flags);
551 break;
552
553 default:
554 qh = knav_queue_open_by_id(name, id, flags);
555 break;
556 }
557 return qh;
558}
559EXPORT_SYMBOL_GPL(knav_queue_open);
560
561
562
563
564
565void knav_queue_close(void *qhandle)
566{
567 struct knav_queue *qh = qhandle;
568 struct knav_queue_inst *inst = qh->inst;
569
570 while (atomic_read(&qh->notifier_enabled) > 0)
571 knav_queue_disable_notifier(qh);
572
573 mutex_lock(&knav_dev_lock);
574 list_del_rcu(&qh->list);
575 mutex_unlock(&knav_dev_lock);
576 synchronize_rcu();
577 if (!knav_queue_is_busy(inst)) {
578 struct knav_range_info *range = inst->range;
579
580 if (range->ops && range->ops->close_queue)
581 range->ops->close_queue(range, inst);
582 }
583 free_percpu(qh->stats);
584 devm_kfree(inst->kdev->dev, qh);
585}
586EXPORT_SYMBOL_GPL(knav_queue_close);
587
588
589
590
591
592
593
594
595
596int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
597 unsigned long arg)
598{
599 struct knav_queue *qh = qhandle;
600 struct knav_queue_notify_config *cfg;
601 int ret;
602
603 switch ((int)cmd) {
604 case KNAV_QUEUE_GET_ID:
605 ret = qh->inst->kdev->base_id + qh->inst->id;
606 break;
607
608 case KNAV_QUEUE_FLUSH:
609 ret = knav_queue_flush(qh);
610 break;
611
612 case KNAV_QUEUE_SET_NOTIFIER:
613 cfg = (void *)arg;
614 ret = knav_queue_set_notifier(qh, cfg);
615 break;
616
617 case KNAV_QUEUE_ENABLE_NOTIFY:
618 ret = knav_queue_enable_notifier(qh);
619 break;
620
621 case KNAV_QUEUE_DISABLE_NOTIFY:
622 ret = knav_queue_disable_notifier(qh);
623 break;
624
625 case KNAV_QUEUE_GET_COUNT:
626 ret = knav_queue_get_count(qh);
627 break;
628
629 default:
630 ret = -ENOTSUPP;
631 break;
632 }
633 return ret;
634}
635EXPORT_SYMBOL_GPL(knav_queue_device_control);
636
637
638
639
640
641
642
643
644
645
646
647
648int knav_queue_push(void *qhandle, dma_addr_t dma,
649 unsigned size, unsigned flags)
650{
651 struct knav_queue *qh = qhandle;
652 u32 val;
653
654 val = (u32)dma | ((size / 16) - 1);
655 writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
656
657 this_cpu_inc(qh->stats->pushes);
658 return 0;
659}
660EXPORT_SYMBOL_GPL(knav_queue_push);
661
662
663
664
665
666
667
668
669dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
670{
671 struct knav_queue *qh = qhandle;
672 struct knav_queue_inst *inst = qh->inst;
673 dma_addr_t dma;
674 u32 val, idx;
675
676
677 if (inst->descs) {
678 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
679 atomic_inc(&inst->desc_count);
680 return 0;
681 }
682 idx = atomic_inc_return(&inst->desc_head);
683 idx &= ACC_DESCS_MASK;
684 val = inst->descs[idx];
685 } else {
686 val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
687 if (unlikely(!val))
688 return 0;
689 }
690
691 dma = val & DESC_PTR_MASK;
692 if (size)
693 *size = ((val & DESC_SIZE_MASK) + 1) * 16;
694
695 this_cpu_inc(qh->stats->pops);
696 return dma;
697}
698EXPORT_SYMBOL_GPL(knav_queue_pop);
699
700
701static void kdesc_fill_pool(struct knav_pool *pool)
702{
703 struct knav_region *region;
704 int i;
705
706 region = pool->region;
707 pool->desc_size = region->desc_size;
708 for (i = 0; i < pool->num_desc; i++) {
709 int index = pool->region_offset + i;
710 dma_addr_t dma_addr;
711 unsigned dma_size;
712 dma_addr = region->dma_start + (region->desc_size * index);
713 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
714 dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
715 DMA_TO_DEVICE);
716 knav_queue_push(pool->queue, dma_addr, dma_size, 0);
717 }
718}
719
720
721static void kdesc_empty_pool(struct knav_pool *pool)
722{
723 dma_addr_t dma;
724 unsigned size;
725 void *desc;
726 int i;
727
728 if (!pool->queue)
729 return;
730
731 for (i = 0;; i++) {
732 dma = knav_queue_pop(pool->queue, &size);
733 if (!dma)
734 break;
735 desc = knav_pool_desc_dma_to_virt(pool, dma);
736 if (!desc) {
737 dev_dbg(pool->kdev->dev,
738 "couldn't unmap desc, continuing\n");
739 continue;
740 }
741 }
742 WARN_ON(i != pool->num_desc);
743 knav_queue_close(pool->queue);
744}
745
746
747
748dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
749{
750 struct knav_pool *pool = ph;
751 return pool->region->dma_start + (virt - pool->region->virt_start);
752}
753EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
754
755void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
756{
757 struct knav_pool *pool = ph;
758 return pool->region->virt_start + (dma - pool->region->dma_start);
759}
760EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
761
762
763
764
765
766
767
768
769
770
771
772void *knav_pool_create(const char *name,
773 int num_desc, int region_id)
774{
775 struct knav_region *reg_itr, *region = NULL;
776 struct knav_pool *pool, *pi;
777 struct list_head *node;
778 unsigned last_offset;
779 bool slot_found;
780 int ret;
781
782 if (!kdev)
783 return ERR_PTR(-EPROBE_DEFER);
784
785 if (!kdev->dev)
786 return ERR_PTR(-ENODEV);
787
788 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
789 if (!pool) {
790 dev_err(kdev->dev, "out of memory allocating pool\n");
791 return ERR_PTR(-ENOMEM);
792 }
793
794 for_each_region(kdev, reg_itr) {
795 if (reg_itr->id != region_id)
796 continue;
797 region = reg_itr;
798 break;
799 }
800
801 if (!region) {
802 dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
803 ret = -EINVAL;
804 goto err;
805 }
806
807 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
808 if (IS_ERR_OR_NULL(pool->queue)) {
809 dev_err(kdev->dev,
810 "failed to open queue for pool(%s), error %ld\n",
811 name, PTR_ERR(pool->queue));
812 ret = PTR_ERR(pool->queue);
813 goto err;
814 }
815
816 pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
817 pool->kdev = kdev;
818 pool->dev = kdev->dev;
819
820 mutex_lock(&knav_dev_lock);
821
822 if (num_desc > (region->num_desc - region->used_desc)) {
823 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
824 region_id, name);
825 ret = -ENOMEM;
826 goto err_unlock;
827 }
828
829
830
831
832
833 last_offset = 0;
834 slot_found = false;
835 node = ®ion->pools;
836 list_for_each_entry(pi, ®ion->pools, region_inst) {
837 if ((pi->region_offset - last_offset) >= num_desc) {
838 slot_found = true;
839 break;
840 }
841 last_offset = pi->region_offset + pi->num_desc;
842 }
843 node = &pi->region_inst;
844
845 if (slot_found) {
846 pool->region = region;
847 pool->num_desc = num_desc;
848 pool->region_offset = last_offset;
849 region->used_desc += num_desc;
850 list_add_tail(&pool->list, &kdev->pools);
851 list_add_tail(&pool->region_inst, node);
852 } else {
853 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
854 name, region_id);
855 ret = -ENOMEM;
856 goto err_unlock;
857 }
858
859 mutex_unlock(&knav_dev_lock);
860 kdesc_fill_pool(pool);
861 return pool;
862
863err_unlock:
864 mutex_unlock(&knav_dev_lock);
865err:
866 kfree(pool->name);
867 devm_kfree(kdev->dev, pool);
868 return ERR_PTR(ret);
869}
870EXPORT_SYMBOL_GPL(knav_pool_create);
871
872
873
874
875
876void knav_pool_destroy(void *ph)
877{
878 struct knav_pool *pool = ph;
879
880 if (!pool)
881 return;
882
883 if (!pool->region)
884 return;
885
886 kdesc_empty_pool(pool);
887 mutex_lock(&knav_dev_lock);
888
889 pool->region->used_desc -= pool->num_desc;
890 list_del(&pool->region_inst);
891 list_del(&pool->list);
892
893 mutex_unlock(&knav_dev_lock);
894 kfree(pool->name);
895 devm_kfree(kdev->dev, pool);
896}
897EXPORT_SYMBOL_GPL(knav_pool_destroy);
898
899
900
901
902
903
904
905
906void *knav_pool_desc_get(void *ph)
907{
908 struct knav_pool *pool = ph;
909 dma_addr_t dma;
910 unsigned size;
911 void *data;
912
913 dma = knav_queue_pop(pool->queue, &size);
914 if (unlikely(!dma))
915 return ERR_PTR(-ENOMEM);
916 data = knav_pool_desc_dma_to_virt(pool, dma);
917 return data;
918}
919EXPORT_SYMBOL_GPL(knav_pool_desc_get);
920
921
922
923
924
925void knav_pool_desc_put(void *ph, void *desc)
926{
927 struct knav_pool *pool = ph;
928 dma_addr_t dma;
929 dma = knav_pool_desc_virt_to_dma(pool, desc);
930 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
931}
932EXPORT_SYMBOL_GPL(knav_pool_desc_put);
933
934
935
936
937
938
939
940
941
942
943
944int knav_pool_desc_map(void *ph, void *desc, unsigned size,
945 dma_addr_t *dma, unsigned *dma_sz)
946{
947 struct knav_pool *pool = ph;
948 *dma = knav_pool_desc_virt_to_dma(pool, desc);
949 size = min(size, pool->region->desc_size);
950 size = ALIGN(size, SMP_CACHE_BYTES);
951 *dma_sz = size;
952 dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
953
954
955 __iowmb();
956
957 return 0;
958}
959EXPORT_SYMBOL_GPL(knav_pool_desc_map);
960
961
962
963
964
965
966
967
968
969
970void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
971{
972 struct knav_pool *pool = ph;
973 unsigned desc_sz;
974 void *desc;
975
976 desc_sz = min(dma_sz, pool->region->desc_size);
977 desc = knav_pool_desc_dma_to_virt(pool, dma);
978 dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
979 prefetch(desc);
980 return desc;
981}
982EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
983
984
985
986
987
988
989int knav_pool_count(void *ph)
990{
991 struct knav_pool *pool = ph;
992 return knav_queue_get_count(pool->queue);
993}
994EXPORT_SYMBOL_GPL(knav_pool_count);
995
996static void knav_queue_setup_region(struct knav_device *kdev,
997 struct knav_region *region)
998{
999 unsigned hw_num_desc, hw_desc_size, size;
1000 struct knav_reg_region __iomem *regs;
1001 struct knav_qmgr_info *qmgr;
1002 struct knav_pool *pool;
1003 int id = region->id;
1004 struct page *page;
1005
1006
1007 if (!region->num_desc) {
1008 dev_warn(kdev->dev, "unused region %s\n", region->name);
1009 return;
1010 }
1011
1012
1013 hw_num_desc = ilog2(region->num_desc - 1) + 1;
1014
1015
1016 if (region->num_desc < 32) {
1017 region->num_desc = 0;
1018 dev_warn(kdev->dev, "too few descriptors in region %s\n",
1019 region->name);
1020 return;
1021 }
1022
1023 size = region->num_desc * region->desc_size;
1024 region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1025 GFP_DMA32);
1026 if (!region->virt_start) {
1027 region->num_desc = 0;
1028 dev_err(kdev->dev, "memory alloc failed for region %s\n",
1029 region->name);
1030 return;
1031 }
1032 region->virt_end = region->virt_start + size;
1033 page = virt_to_page(region->virt_start);
1034
1035 region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1036 DMA_BIDIRECTIONAL);
1037 if (dma_mapping_error(kdev->dev, region->dma_start)) {
1038 dev_err(kdev->dev, "dma map failed for region %s\n",
1039 region->name);
1040 goto fail;
1041 }
1042 region->dma_end = region->dma_start + size;
1043
1044 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1045 if (!pool) {
1046 dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1047 goto fail;
1048 }
1049 pool->num_desc = 0;
1050 pool->region_offset = region->num_desc;
1051 list_add(&pool->region_inst, ®ion->pools);
1052
1053 dev_dbg(kdev->dev,
1054 "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1055 region->name, id, region->desc_size, region->num_desc,
1056 region->link_index, ®ion->dma_start, ®ion->dma_end,
1057 region->virt_start, region->virt_end);
1058
1059 hw_desc_size = (region->desc_size / 16) - 1;
1060 hw_num_desc -= 5;
1061
1062 for_each_qmgr(kdev, qmgr) {
1063 regs = qmgr->reg_region + id;
1064 writel_relaxed((u32)region->dma_start, ®s->base);
1065 writel_relaxed(region->link_index, ®s->start_index);
1066 writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1067 ®s->size_count);
1068 }
1069 return;
1070
1071fail:
1072 if (region->dma_start)
1073 dma_unmap_page(kdev->dev, region->dma_start, size,
1074 DMA_BIDIRECTIONAL);
1075 if (region->virt_start)
1076 free_pages_exact(region->virt_start, size);
1077 region->num_desc = 0;
1078 return;
1079}
1080
1081static const char *knav_queue_find_name(struct device_node *node)
1082{
1083 const char *name;
1084
1085 if (of_property_read_string(node, "label", &name) < 0)
1086 name = node->name;
1087 if (!name)
1088 name = "unknown";
1089 return name;
1090}
1091
1092static int knav_queue_setup_regions(struct knav_device *kdev,
1093 struct device_node *regions)
1094{
1095 struct device *dev = kdev->dev;
1096 struct knav_region *region;
1097 struct device_node *child;
1098 u32 temp[2];
1099 int ret;
1100
1101 for_each_child_of_node(regions, child) {
1102 region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1103 if (!region) {
1104 dev_err(dev, "out of memory allocating region\n");
1105 return -ENOMEM;
1106 }
1107
1108 region->name = knav_queue_find_name(child);
1109 of_property_read_u32(child, "id", ®ion->id);
1110 ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1111 if (!ret) {
1112 region->num_desc = temp[0];
1113 region->desc_size = temp[1];
1114 } else {
1115 dev_err(dev, "invalid region info %s\n", region->name);
1116 devm_kfree(dev, region);
1117 continue;
1118 }
1119
1120 if (!of_get_property(child, "link-index", NULL)) {
1121 dev_err(dev, "No link info for %s\n", region->name);
1122 devm_kfree(dev, region);
1123 continue;
1124 }
1125 ret = of_property_read_u32(child, "link-index",
1126 ®ion->link_index);
1127 if (ret) {
1128 dev_err(dev, "link index not found for %s\n",
1129 region->name);
1130 devm_kfree(dev, region);
1131 continue;
1132 }
1133
1134 INIT_LIST_HEAD(®ion->pools);
1135 list_add_tail(®ion->list, &kdev->regions);
1136 }
1137 if (list_empty(&kdev->regions)) {
1138 dev_err(dev, "no valid region information found\n");
1139 return -ENODEV;
1140 }
1141
1142
1143 for_each_region(kdev, region)
1144 knav_queue_setup_region(kdev, region);
1145
1146 return 0;
1147}
1148
1149static int knav_get_link_ram(struct knav_device *kdev,
1150 const char *name,
1151 struct knav_link_ram_block *block)
1152{
1153 struct platform_device *pdev = to_platform_device(kdev->dev);
1154 struct device_node *node = pdev->dev.of_node;
1155 u32 temp[2];
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 if (!of_property_read_u32_array(node, name , temp, 2)) {
1171 if (temp[0]) {
1172
1173
1174
1175
1176 block->dma = (dma_addr_t)temp[0];
1177 block->virt = NULL;
1178 block->size = temp[1];
1179 } else {
1180 block->size = temp[1];
1181
1182 block->virt = dmam_alloc_coherent(kdev->dev,
1183 8 * block->size, &block->dma,
1184 GFP_KERNEL);
1185 if (!block->virt) {
1186 dev_err(kdev->dev, "failed to alloc linkram\n");
1187 return -ENOMEM;
1188 }
1189 }
1190 } else {
1191 return -ENODEV;
1192 }
1193 return 0;
1194}
1195
1196static int knav_queue_setup_link_ram(struct knav_device *kdev)
1197{
1198 struct knav_link_ram_block *block;
1199 struct knav_qmgr_info *qmgr;
1200
1201 for_each_qmgr(kdev, qmgr) {
1202 block = &kdev->link_rams[0];
1203 dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1204 &block->dma, block->virt, block->size);
1205 writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1206 if (kdev->version == QMSS_66AK2G)
1207 writel_relaxed(block->size,
1208 &qmgr->reg_config->link_ram_size0);
1209 else
1210 writel_relaxed(block->size - 1,
1211 &qmgr->reg_config->link_ram_size0);
1212 block++;
1213 if (!block->size)
1214 continue;
1215
1216 dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1217 &block->dma, block->virt, block->size);
1218 writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1219 }
1220
1221 return 0;
1222}
1223
1224static int knav_setup_queue_range(struct knav_device *kdev,
1225 struct device_node *node)
1226{
1227 struct device *dev = kdev->dev;
1228 struct knav_range_info *range;
1229 struct knav_qmgr_info *qmgr;
1230 u32 temp[2], start, end, id, index;
1231 int ret, i;
1232
1233 range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1234 if (!range) {
1235 dev_err(dev, "out of memory allocating range\n");
1236 return -ENOMEM;
1237 }
1238
1239 range->kdev = kdev;
1240 range->name = knav_queue_find_name(node);
1241 ret = of_property_read_u32_array(node, "qrange", temp, 2);
1242 if (!ret) {
1243 range->queue_base = temp[0] - kdev->base_id;
1244 range->num_queues = temp[1];
1245 } else {
1246 dev_err(dev, "invalid queue range %s\n", range->name);
1247 devm_kfree(dev, range);
1248 return -EINVAL;
1249 }
1250
1251 for (i = 0; i < RANGE_MAX_IRQS; i++) {
1252 struct of_phandle_args oirq;
1253
1254 if (of_irq_parse_one(node, i, &oirq))
1255 break;
1256
1257 range->irqs[i].irq = irq_create_of_mapping(&oirq);
1258 if (range->irqs[i].irq == IRQ_NONE)
1259 break;
1260
1261 range->num_irqs++;
1262
1263 if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1264 unsigned long mask;
1265 int bit;
1266
1267 range->irqs[i].cpu_mask = devm_kzalloc(dev,
1268 cpumask_size(), GFP_KERNEL);
1269 if (!range->irqs[i].cpu_mask)
1270 return -ENOMEM;
1271
1272 mask = (oirq.args[2] & 0x0000ff00) >> 8;
1273 for_each_set_bit(bit, &mask, BITS_PER_LONG)
1274 cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1275 }
1276 }
1277
1278 range->num_irqs = min(range->num_irqs, range->num_queues);
1279 if (range->num_irqs)
1280 range->flags |= RANGE_HAS_IRQ;
1281
1282 if (of_get_property(node, "qalloc-by-id", NULL))
1283 range->flags |= RANGE_RESERVED;
1284
1285 if (of_get_property(node, "accumulator", NULL)) {
1286 ret = knav_init_acc_range(kdev, node, range);
1287 if (ret < 0) {
1288 devm_kfree(dev, range);
1289 return ret;
1290 }
1291 } else {
1292 range->ops = &knav_gp_range_ops;
1293 }
1294
1295
1296 for_each_qmgr(kdev, qmgr) {
1297 start = max(qmgr->start_queue, range->queue_base);
1298 end = min(qmgr->start_queue + qmgr->num_queues,
1299 range->queue_base + range->num_queues);
1300 for (id = start; id < end; id++) {
1301 index = id - qmgr->start_queue;
1302 writel_relaxed(THRESH_GTE | 1,
1303 &qmgr->reg_peek[index].ptr_size_thresh);
1304 writel_relaxed(0,
1305 &qmgr->reg_push[index].ptr_size_thresh);
1306 }
1307 }
1308
1309 list_add_tail(&range->list, &kdev->queue_ranges);
1310 dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1311 range->name, range->queue_base,
1312 range->queue_base + range->num_queues - 1,
1313 range->num_irqs,
1314 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1315 (range->flags & RANGE_RESERVED) ? ", reserved" : "",
1316 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1317 kdev->num_queues_in_use += range->num_queues;
1318 return 0;
1319}
1320
1321static int knav_setup_queue_pools(struct knav_device *kdev,
1322 struct device_node *queue_pools)
1323{
1324 struct device_node *type, *range;
1325 int ret;
1326
1327 for_each_child_of_node(queue_pools, type) {
1328 for_each_child_of_node(type, range) {
1329 ret = knav_setup_queue_range(kdev, range);
1330
1331 }
1332 }
1333
1334
1335 if (list_empty(&kdev->queue_ranges)) {
1336 dev_err(kdev->dev, "no valid queue range found\n");
1337 return -ENODEV;
1338 }
1339 return 0;
1340}
1341
1342static void knav_free_queue_range(struct knav_device *kdev,
1343 struct knav_range_info *range)
1344{
1345 if (range->ops && range->ops->free_range)
1346 range->ops->free_range(range);
1347 list_del(&range->list);
1348 devm_kfree(kdev->dev, range);
1349}
1350
1351static void knav_free_queue_ranges(struct knav_device *kdev)
1352{
1353 struct knav_range_info *range;
1354
1355 for (;;) {
1356 range = first_queue_range(kdev);
1357 if (!range)
1358 break;
1359 knav_free_queue_range(kdev, range);
1360 }
1361}
1362
1363static void knav_queue_free_regions(struct knav_device *kdev)
1364{
1365 struct knav_region *region;
1366 struct knav_pool *pool, *tmp;
1367 unsigned size;
1368
1369 for (;;) {
1370 region = first_region(kdev);
1371 if (!region)
1372 break;
1373 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst)
1374 knav_pool_destroy(pool);
1375
1376 size = region->virt_end - region->virt_start;
1377 if (size)
1378 free_pages_exact(region->virt_start, size);
1379 list_del(®ion->list);
1380 devm_kfree(kdev->dev, region);
1381 }
1382}
1383
1384static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1385 struct device_node *node, int index)
1386{
1387 struct resource res;
1388 void __iomem *regs;
1389 int ret;
1390
1391 ret = of_address_to_resource(node, index, &res);
1392 if (ret) {
1393 dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
1394 node, index);
1395 return ERR_PTR(ret);
1396 }
1397
1398 regs = devm_ioremap_resource(kdev->dev, &res);
1399 if (IS_ERR(regs))
1400 dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
1401 index, node);
1402 return regs;
1403}
1404
1405static int knav_queue_init_qmgrs(struct knav_device *kdev,
1406 struct device_node *qmgrs)
1407{
1408 struct device *dev = kdev->dev;
1409 struct knav_qmgr_info *qmgr;
1410 struct device_node *child;
1411 u32 temp[2];
1412 int ret;
1413
1414 for_each_child_of_node(qmgrs, child) {
1415 qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1416 if (!qmgr) {
1417 dev_err(dev, "out of memory allocating qmgr\n");
1418 return -ENOMEM;
1419 }
1420
1421 ret = of_property_read_u32_array(child, "managed-queues",
1422 temp, 2);
1423 if (!ret) {
1424 qmgr->start_queue = temp[0];
1425 qmgr->num_queues = temp[1];
1426 } else {
1427 dev_err(dev, "invalid qmgr queue range\n");
1428 devm_kfree(dev, qmgr);
1429 continue;
1430 }
1431
1432 dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1433 qmgr->start_queue, qmgr->num_queues);
1434
1435 qmgr->reg_peek =
1436 knav_queue_map_reg(kdev, child,
1437 KNAV_QUEUE_PEEK_REG_INDEX);
1438
1439 if (kdev->version == QMSS) {
1440 qmgr->reg_status =
1441 knav_queue_map_reg(kdev, child,
1442 KNAV_QUEUE_STATUS_REG_INDEX);
1443 }
1444
1445 qmgr->reg_config =
1446 knav_queue_map_reg(kdev, child,
1447 (kdev->version == QMSS_66AK2G) ?
1448 KNAV_L_QUEUE_CONFIG_REG_INDEX :
1449 KNAV_QUEUE_CONFIG_REG_INDEX);
1450 qmgr->reg_region =
1451 knav_queue_map_reg(kdev, child,
1452 (kdev->version == QMSS_66AK2G) ?
1453 KNAV_L_QUEUE_REGION_REG_INDEX :
1454 KNAV_QUEUE_REGION_REG_INDEX);
1455
1456 qmgr->reg_push =
1457 knav_queue_map_reg(kdev, child,
1458 (kdev->version == QMSS_66AK2G) ?
1459 KNAV_L_QUEUE_PUSH_REG_INDEX :
1460 KNAV_QUEUE_PUSH_REG_INDEX);
1461
1462 if (kdev->version == QMSS) {
1463 qmgr->reg_pop =
1464 knav_queue_map_reg(kdev, child,
1465 KNAV_QUEUE_POP_REG_INDEX);
1466 }
1467
1468 if (IS_ERR(qmgr->reg_peek) ||
1469 ((kdev->version == QMSS) &&
1470 (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1471 IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1472 IS_ERR(qmgr->reg_push)) {
1473 dev_err(dev, "failed to map qmgr regs\n");
1474 if (kdev->version == QMSS) {
1475 if (!IS_ERR(qmgr->reg_status))
1476 devm_iounmap(dev, qmgr->reg_status);
1477 if (!IS_ERR(qmgr->reg_pop))
1478 devm_iounmap(dev, qmgr->reg_pop);
1479 }
1480 if (!IS_ERR(qmgr->reg_peek))
1481 devm_iounmap(dev, qmgr->reg_peek);
1482 if (!IS_ERR(qmgr->reg_config))
1483 devm_iounmap(dev, qmgr->reg_config);
1484 if (!IS_ERR(qmgr->reg_region))
1485 devm_iounmap(dev, qmgr->reg_region);
1486 if (!IS_ERR(qmgr->reg_push))
1487 devm_iounmap(dev, qmgr->reg_push);
1488 devm_kfree(dev, qmgr);
1489 continue;
1490 }
1491
1492
1493 if (kdev->version == QMSS_66AK2G)
1494 qmgr->reg_pop = qmgr->reg_push;
1495
1496 list_add_tail(&qmgr->list, &kdev->qmgrs);
1497 dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1498 qmgr->start_queue, qmgr->num_queues,
1499 qmgr->reg_peek, qmgr->reg_status,
1500 qmgr->reg_config, qmgr->reg_region,
1501 qmgr->reg_push, qmgr->reg_pop);
1502 }
1503 return 0;
1504}
1505
1506static int knav_queue_init_pdsps(struct knav_device *kdev,
1507 struct device_node *pdsps)
1508{
1509 struct device *dev = kdev->dev;
1510 struct knav_pdsp_info *pdsp;
1511 struct device_node *child;
1512
1513 for_each_child_of_node(pdsps, child) {
1514 pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1515 if (!pdsp) {
1516 dev_err(dev, "out of memory allocating pdsp\n");
1517 return -ENOMEM;
1518 }
1519 pdsp->name = knav_queue_find_name(child);
1520 pdsp->iram =
1521 knav_queue_map_reg(kdev, child,
1522 KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1523 pdsp->regs =
1524 knav_queue_map_reg(kdev, child,
1525 KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1526 pdsp->intd =
1527 knav_queue_map_reg(kdev, child,
1528 KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1529 pdsp->command =
1530 knav_queue_map_reg(kdev, child,
1531 KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1532
1533 if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1534 IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1535 dev_err(dev, "failed to map pdsp %s regs\n",
1536 pdsp->name);
1537 if (!IS_ERR(pdsp->command))
1538 devm_iounmap(dev, pdsp->command);
1539 if (!IS_ERR(pdsp->iram))
1540 devm_iounmap(dev, pdsp->iram);
1541 if (!IS_ERR(pdsp->regs))
1542 devm_iounmap(dev, pdsp->regs);
1543 if (!IS_ERR(pdsp->intd))
1544 devm_iounmap(dev, pdsp->intd);
1545 devm_kfree(dev, pdsp);
1546 continue;
1547 }
1548 of_property_read_u32(child, "id", &pdsp->id);
1549 list_add_tail(&pdsp->list, &kdev->pdsps);
1550 dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1551 pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1552 pdsp->intd);
1553 }
1554 return 0;
1555}
1556
1557static int knav_queue_stop_pdsp(struct knav_device *kdev,
1558 struct knav_pdsp_info *pdsp)
1559{
1560 u32 val, timeout = 1000;
1561 int ret;
1562
1563 val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1564 writel_relaxed(val, &pdsp->regs->control);
1565 ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1566 PDSP_CTRL_RUNNING);
1567 if (ret < 0) {
1568 dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1569 return ret;
1570 }
1571 pdsp->loaded = false;
1572 pdsp->started = false;
1573 return 0;
1574}
1575
1576static int knav_queue_load_pdsp(struct knav_device *kdev,
1577 struct knav_pdsp_info *pdsp)
1578{
1579 int i, ret, fwlen;
1580 const struct firmware *fw;
1581 bool found = false;
1582 u32 *fwdata;
1583
1584 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1585 if (knav_acc_firmwares[i]) {
1586 ret = request_firmware_direct(&fw,
1587 knav_acc_firmwares[i],
1588 kdev->dev);
1589 if (!ret) {
1590 found = true;
1591 break;
1592 }
1593 }
1594 }
1595
1596 if (!found) {
1597 dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1598 return -ENODEV;
1599 }
1600
1601 dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1602 knav_acc_firmwares[i]);
1603
1604 writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1605
1606 fwdata = (u32 *)fw->data;
1607 fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1608 for (i = 0; i < fwlen; i++)
1609 writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1610
1611 release_firmware(fw);
1612 return 0;
1613}
1614
1615static int knav_queue_start_pdsp(struct knav_device *kdev,
1616 struct knav_pdsp_info *pdsp)
1617{
1618 u32 val, timeout = 1000;
1619 int ret;
1620
1621
1622 writel_relaxed(0xffffffff, pdsp->command);
1623 while (readl_relaxed(pdsp->command) != 0xffffffff)
1624 cpu_relax();
1625
1626
1627 val = readl_relaxed(&pdsp->regs->control);
1628 val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1629 writel_relaxed(val, &pdsp->regs->control);
1630
1631
1632 val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1633 writel_relaxed(val, &pdsp->regs->control);
1634
1635
1636 ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1637 if (ret < 0) {
1638 dev_err(kdev->dev,
1639 "timed out on pdsp %s command register wait\n",
1640 pdsp->name);
1641 return ret;
1642 }
1643 return 0;
1644}
1645
1646static void knav_queue_stop_pdsps(struct knav_device *kdev)
1647{
1648 struct knav_pdsp_info *pdsp;
1649
1650
1651 for_each_pdsp(kdev, pdsp)
1652 knav_queue_stop_pdsp(kdev, pdsp);
1653}
1654
1655static int knav_queue_start_pdsps(struct knav_device *kdev)
1656{
1657 struct knav_pdsp_info *pdsp;
1658 int ret;
1659
1660 knav_queue_stop_pdsps(kdev);
1661
1662
1663
1664
1665
1666
1667 for_each_pdsp(kdev, pdsp) {
1668 ret = knav_queue_load_pdsp(kdev, pdsp);
1669 if (!ret)
1670 pdsp->loaded = true;
1671 }
1672
1673 for_each_pdsp(kdev, pdsp) {
1674 if (pdsp->loaded) {
1675 ret = knav_queue_start_pdsp(kdev, pdsp);
1676 if (!ret)
1677 pdsp->started = true;
1678 }
1679 }
1680 return 0;
1681}
1682
1683static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1684{
1685 struct knav_qmgr_info *qmgr;
1686
1687 for_each_qmgr(kdev, qmgr) {
1688 if ((id >= qmgr->start_queue) &&
1689 (id < qmgr->start_queue + qmgr->num_queues))
1690 return qmgr;
1691 }
1692 return NULL;
1693}
1694
1695static int knav_queue_init_queue(struct knav_device *kdev,
1696 struct knav_range_info *range,
1697 struct knav_queue_inst *inst,
1698 unsigned id)
1699{
1700 char irq_name[KNAV_NAME_SIZE];
1701 inst->qmgr = knav_find_qmgr(id);
1702 if (!inst->qmgr)
1703 return -1;
1704
1705 INIT_LIST_HEAD(&inst->handles);
1706 inst->kdev = kdev;
1707 inst->range = range;
1708 inst->irq_num = -1;
1709 inst->id = id;
1710 scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1711 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1712
1713 if (range->ops && range->ops->init_queue)
1714 return range->ops->init_queue(range, inst);
1715 else
1716 return 0;
1717}
1718
1719static int knav_queue_init_queues(struct knav_device *kdev)
1720{
1721 struct knav_range_info *range;
1722 int size, id, base_idx;
1723 int idx = 0, ret = 0;
1724
1725
1726 size = sizeof(struct knav_queue_inst);
1727
1728
1729
1730
1731 kdev->inst_shift = order_base_2(size);
1732 size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1733 kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1734 if (!kdev->instances)
1735 return -ENOMEM;
1736
1737 for_each_queue_range(kdev, range) {
1738 if (range->ops && range->ops->init_range)
1739 range->ops->init_range(range);
1740 base_idx = idx;
1741 for (id = range->queue_base;
1742 id < range->queue_base + range->num_queues; id++, idx++) {
1743 ret = knav_queue_init_queue(kdev, range,
1744 knav_queue_idx_to_inst(kdev, idx), id);
1745 if (ret < 0)
1746 return ret;
1747 }
1748 range->queue_base_inst =
1749 knav_queue_idx_to_inst(kdev, base_idx);
1750 }
1751 return 0;
1752}
1753
1754
1755static const struct of_device_id keystone_qmss_of_match[] = {
1756 {
1757 .compatible = "ti,keystone-navigator-qmss",
1758 },
1759 {
1760 .compatible = "ti,66ak2g-navss-qm",
1761 .data = (void *)QMSS_66AK2G,
1762 },
1763 {},
1764};
1765MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1766
1767static int knav_queue_probe(struct platform_device *pdev)
1768{
1769 struct device_node *node = pdev->dev.of_node;
1770 struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1771 const struct of_device_id *match;
1772 struct device *dev = &pdev->dev;
1773 u32 temp[2];
1774 int ret;
1775
1776 if (!node) {
1777 dev_err(dev, "device tree info unavailable\n");
1778 return -ENODEV;
1779 }
1780
1781 kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1782 if (!kdev) {
1783 dev_err(dev, "memory allocation failed\n");
1784 return -ENOMEM;
1785 }
1786
1787 match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
1788 if (match && match->data)
1789 kdev->version = QMSS_66AK2G;
1790
1791 platform_set_drvdata(pdev, kdev);
1792 kdev->dev = dev;
1793 INIT_LIST_HEAD(&kdev->queue_ranges);
1794 INIT_LIST_HEAD(&kdev->qmgrs);
1795 INIT_LIST_HEAD(&kdev->pools);
1796 INIT_LIST_HEAD(&kdev->regions);
1797 INIT_LIST_HEAD(&kdev->pdsps);
1798
1799 pm_runtime_enable(&pdev->dev);
1800 ret = pm_runtime_get_sync(&pdev->dev);
1801 if (ret < 0) {
1802 dev_err(dev, "Failed to enable QMSS\n");
1803 return ret;
1804 }
1805
1806 if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1807 dev_err(dev, "queue-range not specified\n");
1808 ret = -ENODEV;
1809 goto err;
1810 }
1811 kdev->base_id = temp[0];
1812 kdev->num_queues = temp[1];
1813
1814
1815 qmgrs = of_get_child_by_name(node, "qmgrs");
1816 if (!qmgrs) {
1817 dev_err(dev, "queue manager info not specified\n");
1818 ret = -ENODEV;
1819 goto err;
1820 }
1821 ret = knav_queue_init_qmgrs(kdev, qmgrs);
1822 of_node_put(qmgrs);
1823 if (ret)
1824 goto err;
1825
1826
1827 pdsps = of_get_child_by_name(node, "pdsps");
1828 if (pdsps) {
1829 ret = knav_queue_init_pdsps(kdev, pdsps);
1830 if (ret)
1831 goto err;
1832
1833 ret = knav_queue_start_pdsps(kdev);
1834 if (ret)
1835 goto err;
1836 }
1837 of_node_put(pdsps);
1838
1839
1840 queue_pools = of_get_child_by_name(node, "queue-pools");
1841 if (!queue_pools) {
1842 dev_err(dev, "queue-pools not specified\n");
1843 ret = -ENODEV;
1844 goto err;
1845 }
1846 ret = knav_setup_queue_pools(kdev, queue_pools);
1847 of_node_put(queue_pools);
1848 if (ret)
1849 goto err;
1850
1851 ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1852 if (ret) {
1853 dev_err(kdev->dev, "could not setup linking ram\n");
1854 goto err;
1855 }
1856
1857 ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1858 if (ret) {
1859
1860
1861
1862
1863 }
1864
1865 ret = knav_queue_setup_link_ram(kdev);
1866 if (ret)
1867 goto err;
1868
1869 regions = of_get_child_by_name(node, "descriptor-regions");
1870 if (!regions) {
1871 dev_err(dev, "descriptor-regions not specified\n");
1872 goto err;
1873 }
1874 ret = knav_queue_setup_regions(kdev, regions);
1875 of_node_put(regions);
1876 if (ret)
1877 goto err;
1878
1879 ret = knav_queue_init_queues(kdev);
1880 if (ret < 0) {
1881 dev_err(dev, "hwqueue initialization failed\n");
1882 goto err;
1883 }
1884
1885 debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1886 &knav_queue_debug_ops);
1887 device_ready = true;
1888 return 0;
1889
1890err:
1891 knav_queue_stop_pdsps(kdev);
1892 knav_queue_free_regions(kdev);
1893 knav_free_queue_ranges(kdev);
1894 pm_runtime_put_sync(&pdev->dev);
1895 pm_runtime_disable(&pdev->dev);
1896 return ret;
1897}
1898
1899static int knav_queue_remove(struct platform_device *pdev)
1900{
1901
1902 pm_runtime_put_sync(&pdev->dev);
1903 pm_runtime_disable(&pdev->dev);
1904 return 0;
1905}
1906
1907static struct platform_driver keystone_qmss_driver = {
1908 .probe = knav_queue_probe,
1909 .remove = knav_queue_remove,
1910 .driver = {
1911 .name = "keystone-navigator-qmss",
1912 .of_match_table = keystone_qmss_of_match,
1913 },
1914};
1915module_platform_driver(keystone_qmss_driver);
1916
1917MODULE_LICENSE("GPL v2");
1918MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1919MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1920MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
1921