1
2
3
4
5
6#define pr_fmt(fmt) "xive: " fmt
7
8#include <linux/types.h>
9#include <linux/irq.h>
10#include <linux/smp.h>
11#include <linux/interrupt.h>
12#include <linux/init.h>
13#include <linux/of.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/cpumask.h>
17#include <linux/mm.h>
18#include <linux/delay.h>
19#include <linux/libfdt.h>
20
21#include <asm/machdep.h>
22#include <asm/prom.h>
23#include <asm/io.h>
24#include <asm/smp.h>
25#include <asm/irq.h>
26#include <asm/errno.h>
27#include <asm/xive.h>
28#include <asm/xive-regs.h>
29#include <asm/hvcall.h>
30
31#include "xive-internal.h"
32
33static u32 xive_queue_shift;
34
35struct xive_irq_bitmap {
36 unsigned long *bitmap;
37 unsigned int base;
38 unsigned int count;
39 spinlock_t lock;
40 struct list_head list;
41};
42
43static LIST_HEAD(xive_irq_bitmaps);
44
45static int xive_irq_bitmap_add(int base, int count)
46{
47 struct xive_irq_bitmap *xibm;
48
49 xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
50 if (!xibm)
51 return -ENOMEM;
52
53 spin_lock_init(&xibm->lock);
54 xibm->base = base;
55 xibm->count = count;
56 xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
57 if (!xibm->bitmap) {
58 kfree(xibm);
59 return -ENOMEM;
60 }
61 list_add(&xibm->list, &xive_irq_bitmaps);
62
63 pr_info("Using IRQ range [%x-%x]", xibm->base,
64 xibm->base + xibm->count - 1);
65 return 0;
66}
67
68static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
69{
70 int irq;
71
72 irq = find_first_zero_bit(xibm->bitmap, xibm->count);
73 if (irq != xibm->count) {
74 set_bit(irq, xibm->bitmap);
75 irq += xibm->base;
76 } else {
77 irq = -ENOMEM;
78 }
79
80 return irq;
81}
82
83static int xive_irq_bitmap_alloc(void)
84{
85 struct xive_irq_bitmap *xibm;
86 unsigned long flags;
87 int irq = -ENOENT;
88
89 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
90 spin_lock_irqsave(&xibm->lock, flags);
91 irq = __xive_irq_bitmap_alloc(xibm);
92 spin_unlock_irqrestore(&xibm->lock, flags);
93 if (irq >= 0)
94 break;
95 }
96 return irq;
97}
98
99static void xive_irq_bitmap_free(int irq)
100{
101 unsigned long flags;
102 struct xive_irq_bitmap *xibm;
103
104 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
105 if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
106 spin_lock_irqsave(&xibm->lock, flags);
107 clear_bit(irq - xibm->base, xibm->bitmap);
108 spin_unlock_irqrestore(&xibm->lock, flags);
109 break;
110 }
111 }
112}
113
114
115
116static unsigned int plpar_busy_delay_time(long rc)
117{
118 unsigned int ms = 0;
119
120 if (H_IS_LONG_BUSY(rc)) {
121 ms = get_longbusy_msecs(rc);
122 } else if (rc == H_BUSY) {
123 ms = 10;
124 }
125
126 return ms;
127}
128
129static unsigned int plpar_busy_delay(int rc)
130{
131 unsigned int ms;
132
133 ms = plpar_busy_delay_time(rc);
134 if (ms)
135 mdelay(ms);
136
137 return ms;
138}
139
140
141
142
143
144
145static long plpar_int_reset(unsigned long flags)
146{
147 long rc;
148
149 do {
150 rc = plpar_hcall_norets(H_INT_RESET, flags);
151 } while (plpar_busy_delay(rc));
152
153 if (rc)
154 pr_err("H_INT_RESET failed %ld\n", rc);
155
156 return rc;
157}
158
159static long plpar_int_get_source_info(unsigned long flags,
160 unsigned long lisn,
161 unsigned long *src_flags,
162 unsigned long *eoi_page,
163 unsigned long *trig_page,
164 unsigned long *esb_shift)
165{
166 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
167 long rc;
168
169 do {
170 rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
171 } while (plpar_busy_delay(rc));
172
173 if (rc) {
174 pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
175 return rc;
176 }
177
178 *src_flags = retbuf[0];
179 *eoi_page = retbuf[1];
180 *trig_page = retbuf[2];
181 *esb_shift = retbuf[3];
182
183 pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
184 retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
185
186 return 0;
187}
188
189#define XIVE_SRC_SET_EISN (1ull << (63 - 62))
190#define XIVE_SRC_MASK (1ull << (63 - 63))
191
192static long plpar_int_set_source_config(unsigned long flags,
193 unsigned long lisn,
194 unsigned long target,
195 unsigned long prio,
196 unsigned long sw_irq)
197{
198 long rc;
199
200
201 pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
202 flags, lisn, target, prio, sw_irq);
203
204
205 do {
206 rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
207 target, prio, sw_irq);
208 } while (plpar_busy_delay(rc));
209
210 if (rc) {
211 pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
212 lisn, target, prio, rc);
213 return rc;
214 }
215
216 return 0;
217}
218
219static long plpar_int_get_source_config(unsigned long flags,
220 unsigned long lisn,
221 unsigned long *target,
222 unsigned long *prio,
223 unsigned long *sw_irq)
224{
225 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
226 long rc;
227
228 pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn);
229
230 do {
231 rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
232 target, prio, sw_irq);
233 } while (plpar_busy_delay(rc));
234
235 if (rc) {
236 pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n",
237 lisn, rc);
238 return rc;
239 }
240
241 *target = retbuf[0];
242 *prio = retbuf[1];
243 *sw_irq = retbuf[2];
244
245 pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n",
246 retbuf[0], retbuf[1], retbuf[2]);
247
248 return 0;
249}
250
251static long plpar_int_get_queue_info(unsigned long flags,
252 unsigned long target,
253 unsigned long priority,
254 unsigned long *esn_page,
255 unsigned long *esn_size)
256{
257 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
258 long rc;
259
260 do {
261 rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
262 priority);
263 } while (plpar_busy_delay(rc));
264
265 if (rc) {
266 pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
267 target, priority, rc);
268 return rc;
269 }
270
271 *esn_page = retbuf[0];
272 *esn_size = retbuf[1];
273
274 pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
275 retbuf[0], retbuf[1]);
276
277 return 0;
278}
279
280#define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
281
282static long plpar_int_set_queue_config(unsigned long flags,
283 unsigned long target,
284 unsigned long priority,
285 unsigned long qpage,
286 unsigned long qsize)
287{
288 long rc;
289
290 pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
291 flags, target, priority, qpage, qsize);
292
293 do {
294 rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
295 priority, qpage, qsize);
296 } while (plpar_busy_delay(rc));
297
298 if (rc) {
299 pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
300 target, priority, qpage, rc);
301 return rc;
302 }
303
304 return 0;
305}
306
307static long plpar_int_sync(unsigned long flags, unsigned long lisn)
308{
309 long rc;
310
311 do {
312 rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
313 } while (plpar_busy_delay(rc));
314
315 if (rc) {
316 pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
317 return rc;
318 }
319
320 return 0;
321}
322
323#define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
324
325static long plpar_int_esb(unsigned long flags,
326 unsigned long lisn,
327 unsigned long offset,
328 unsigned long in_data,
329 unsigned long *out_data)
330{
331 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
332 long rc;
333
334 pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
335 flags, lisn, offset, in_data);
336
337 do {
338 rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
339 in_data);
340 } while (plpar_busy_delay(rc));
341
342 if (rc) {
343 pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
344 lisn, offset, rc);
345 return rc;
346 }
347
348 *out_data = retbuf[0];
349
350 return 0;
351}
352
353static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
354{
355 unsigned long read_data;
356 long rc;
357
358 rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
359 lisn, offset, data, &read_data);
360 if (rc)
361 return -1;
362
363 return write ? 0 : read_data;
364}
365
366#define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
367#define XIVE_SRC_LSI (1ull << (63 - 61))
368#define XIVE_SRC_TRIGGER (1ull << (63 - 62))
369#define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
370
371static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
372{
373 long rc;
374 unsigned long flags;
375 unsigned long eoi_page;
376 unsigned long trig_page;
377 unsigned long esb_shift;
378
379 memset(data, 0, sizeof(*data));
380
381 rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
382 &esb_shift);
383 if (rc)
384 return -EINVAL;
385
386 if (flags & XIVE_SRC_H_INT_ESB)
387 data->flags |= XIVE_IRQ_FLAG_H_INT_ESB;
388 if (flags & XIVE_SRC_STORE_EOI)
389 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
390 if (flags & XIVE_SRC_LSI)
391 data->flags |= XIVE_IRQ_FLAG_LSI;
392 data->eoi_page = eoi_page;
393 data->esb_shift = esb_shift;
394 data->trig_page = trig_page;
395
396 data->hw_irq = hw_irq;
397
398
399
400
401
402 data->src_chip = XIVE_INVALID_CHIP_ID;
403
404
405
406
407
408
409 if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
410 return 0;
411
412 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
413 if (!data->eoi_mmio) {
414 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
415 return -ENOMEM;
416 }
417
418
419 if (flags & XIVE_SRC_TRIGGER) {
420 data->trig_mmio = data->eoi_mmio;
421 return 0;
422 }
423
424 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
425 if (!data->trig_mmio) {
426 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
427 return -ENOMEM;
428 }
429 return 0;
430}
431
432static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
433{
434 long rc;
435
436 rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
437 prio, sw_irq);
438
439 return rc == 0 ? 0 : -ENXIO;
440}
441
442static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
443 u32 *sw_irq)
444{
445 long rc;
446 unsigned long h_target;
447 unsigned long h_prio;
448 unsigned long h_sw_irq;
449
450 rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
451 &h_sw_irq);
452
453 *target = h_target;
454 *prio = h_prio;
455 *sw_irq = h_sw_irq;
456
457 return rc == 0 ? 0 : -ENXIO;
458}
459
460
461static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
462 __be32 *qpage, u32 order)
463{
464 s64 rc = 0;
465 unsigned long esn_page;
466 unsigned long esn_size;
467 u64 flags, qpage_phys;
468
469
470 if (order) {
471 if (WARN_ON(!qpage))
472 return -EINVAL;
473 qpage_phys = __pa(qpage);
474 } else {
475 qpage_phys = 0;
476 }
477
478
479 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
480 q->idx = 0;
481 q->toggle = 0;
482
483 rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
484 if (rc) {
485 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
486 target, prio);
487 rc = -EIO;
488 goto fail;
489 }
490
491
492 q->eoi_phys = esn_page;
493
494
495 flags = XIVE_EQ_ALWAYS_NOTIFY;
496
497
498 rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
499 if (rc) {
500 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
501 target, prio);
502 rc = -EIO;
503 } else {
504 q->qpage = qpage;
505 }
506fail:
507 return rc;
508}
509
510static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
511 u8 prio)
512{
513 struct xive_q *q = &xc->queue[prio];
514 __be32 *qpage;
515
516 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
517 if (IS_ERR(qpage))
518 return PTR_ERR(qpage);
519
520 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
521 q, prio, qpage, xive_queue_shift);
522}
523
524static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
525 u8 prio)
526{
527 struct xive_q *q = &xc->queue[prio];
528 unsigned int alloc_order;
529 long rc;
530 int hw_cpu = get_hard_smp_processor_id(cpu);
531
532 rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
533 if (rc)
534 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
535 hw_cpu, prio);
536
537 alloc_order = xive_alloc_order(xive_queue_shift);
538 free_pages((unsigned long)q->qpage, alloc_order);
539 q->qpage = NULL;
540}
541
542static bool xive_spapr_match(struct device_node *node)
543{
544
545 return 1;
546}
547
548#ifdef CONFIG_SMP
549static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
550{
551 int irq = xive_irq_bitmap_alloc();
552
553 if (irq < 0) {
554 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
555 return -ENXIO;
556 }
557
558 xc->hw_ipi = irq;
559 return 0;
560}
561
562static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
563{
564 if (xc->hw_ipi == XIVE_BAD_IRQ)
565 return;
566
567 xive_irq_bitmap_free(xc->hw_ipi);
568 xc->hw_ipi = XIVE_BAD_IRQ;
569}
570#endif
571
572static void xive_spapr_shutdown(void)
573{
574 plpar_int_reset(0);
575}
576
577
578
579
580
581static void xive_spapr_update_pending(struct xive_cpu *xc)
582{
583 u8 nsr, cppr;
584 u16 ack;
585
586
587
588
589
590
591
592
593 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
594
595
596 mb();
597
598
599
600
601
602 cppr = ack & 0xff;
603 nsr = ack >> 8;
604
605 if (nsr & TM_QW1_NSR_EO) {
606 if (cppr == 0xff)
607 return;
608
609 xc->pending_prio |= 1 << cppr;
610
611
612
613
614
615 if (cppr >= xc->cppr)
616 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
617 smp_processor_id(), cppr, xc->cppr);
618
619
620 xc->cppr = cppr;
621 }
622}
623
624static void xive_spapr_eoi(u32 hw_irq)
625{
626 ;
627}
628
629static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
630{
631
632 pr_debug("(HW value: %08x %08x %08x)\n",
633 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
634 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
635 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
636}
637
638static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
639{
640 ;
641}
642
643static void xive_spapr_sync_source(u32 hw_irq)
644{
645
646 plpar_int_sync(0, hw_irq);
647}
648
649static int xive_spapr_debug_show(struct seq_file *m, void *private)
650{
651 struct xive_irq_bitmap *xibm;
652 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
653
654 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
655 memset(buf, 0, PAGE_SIZE);
656 bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
657 seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
658 }
659 kfree(buf);
660
661 return 0;
662}
663
664static const struct xive_ops xive_spapr_ops = {
665 .populate_irq_data = xive_spapr_populate_irq_data,
666 .configure_irq = xive_spapr_configure_irq,
667 .get_irq_config = xive_spapr_get_irq_config,
668 .setup_queue = xive_spapr_setup_queue,
669 .cleanup_queue = xive_spapr_cleanup_queue,
670 .match = xive_spapr_match,
671 .shutdown = xive_spapr_shutdown,
672 .update_pending = xive_spapr_update_pending,
673 .eoi = xive_spapr_eoi,
674 .setup_cpu = xive_spapr_setup_cpu,
675 .teardown_cpu = xive_spapr_teardown_cpu,
676 .sync_source = xive_spapr_sync_source,
677 .esb_rw = xive_spapr_esb_rw,
678#ifdef CONFIG_SMP
679 .get_ipi = xive_spapr_get_ipi,
680 .put_ipi = xive_spapr_put_ipi,
681 .debug_show = xive_spapr_debug_show,
682#endif
683 .name = "spapr",
684};
685
686
687
688
689static bool xive_get_max_prio(u8 *max_prio)
690{
691 struct device_node *rootdn;
692 const __be32 *reg;
693 u32 len;
694 int prio, found;
695
696 rootdn = of_find_node_by_path("/");
697 if (!rootdn) {
698 pr_err("not root node found !\n");
699 return false;
700 }
701
702 reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
703 if (!reg) {
704 pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
705 return false;
706 }
707
708 if (len % (2 * sizeof(u32)) != 0) {
709 pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
710 return false;
711 }
712
713
714
715
716
717 found = 0xFF;
718 for (prio = 0; prio < 8; prio++) {
719 int reserved = 0;
720 int i;
721
722 for (i = 0; i < len / (2 * sizeof(u32)); i++) {
723 int base = be32_to_cpu(reg[2 * i]);
724 int range = be32_to_cpu(reg[2 * i + 1]);
725
726 if (prio >= base && prio < base + range)
727 reserved++;
728 }
729
730 if (!reserved)
731 found = prio;
732 }
733
734 if (found == 0xFF) {
735 pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
736 return false;
737 }
738
739 *max_prio = found;
740 return true;
741}
742
743static const u8 *get_vec5_feature(unsigned int index)
744{
745 unsigned long root, chosen;
746 int size;
747 const u8 *vec5;
748
749 root = of_get_flat_dt_root();
750 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
751 if (chosen == -FDT_ERR_NOTFOUND)
752 return NULL;
753
754 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
755 if (!vec5)
756 return NULL;
757
758 if (size <= index)
759 return NULL;
760
761 return vec5 + index;
762}
763
764static bool xive_spapr_disabled(void)
765{
766 const u8 *vec5_xive;
767
768 vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
769 if (vec5_xive) {
770 u8 val;
771
772 val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
773 switch (val) {
774 case OV5_FEAT(OV5_XIVE_EITHER):
775 case OV5_FEAT(OV5_XIVE_LEGACY):
776 break;
777 case OV5_FEAT(OV5_XIVE_EXPLOIT):
778
779 if (xive_cmdline_disabled)
780 pr_warn("WARNING: Ignoring cmdline option xive=off\n");
781 return false;
782 default:
783 pr_warn("%s: Unknown xive support option: 0x%x\n",
784 __func__, val);
785 break;
786 }
787 }
788
789 return xive_cmdline_disabled;
790}
791
792bool __init xive_spapr_init(void)
793{
794 struct device_node *np;
795 struct resource r;
796 void __iomem *tima;
797 struct property *prop;
798 u8 max_prio;
799 u32 val;
800 u32 len;
801 const __be32 *reg;
802 int i;
803
804 if (xive_spapr_disabled())
805 return false;
806
807 pr_devel("%s()\n", __func__);
808 np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
809 if (!np) {
810 pr_devel("not found !\n");
811 return false;
812 }
813 pr_devel("Found %s\n", np->full_name);
814
815
816 if (of_address_to_resource(np, 1, &r)) {
817 pr_err("Failed to get thread mgmnt area resource\n");
818 return false;
819 }
820 tima = ioremap(r.start, resource_size(&r));
821 if (!tima) {
822 pr_err("Failed to map thread mgmnt area\n");
823 return false;
824 }
825
826 if (!xive_get_max_prio(&max_prio))
827 return false;
828
829
830 reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
831 if (!reg) {
832 pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
833 return false;
834 }
835
836 if (len % (2 * sizeof(u32)) != 0) {
837 pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
838 return false;
839 }
840
841 for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2)
842 xive_irq_bitmap_add(be32_to_cpu(reg[0]),
843 be32_to_cpu(reg[1]));
844
845
846 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
847 xive_queue_shift = val;
848 if (val == PAGE_SHIFT)
849 break;
850 }
851
852
853 if (!xive_core_init(&xive_spapr_ops, tima, TM_QW1_OS, max_prio))
854 return false;
855
856 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
857 return true;
858}
859
860machine_arch_initcall(pseries, xive_core_debug_init);
861