1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "xive: " fmt
11
12#include <linux/types.h>
13#include <linux/irq.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/of.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/cpumask.h>
21#include <linux/mm.h>
22#include <linux/delay.h>
23#include <linux/libfdt.h>
24
25#include <asm/prom.h>
26#include <asm/io.h>
27#include <asm/smp.h>
28#include <asm/irq.h>
29#include <asm/errno.h>
30#include <asm/xive.h>
31#include <asm/xive-regs.h>
32#include <asm/hvcall.h>
33#include <asm/svm.h>
34#include <asm/ultravisor.h>
35
36#include "xive-internal.h"
37
38static u32 xive_queue_shift;
39
40struct xive_irq_bitmap {
41 unsigned long *bitmap;
42 unsigned int base;
43 unsigned int count;
44 spinlock_t lock;
45 struct list_head list;
46};
47
48static LIST_HEAD(xive_irq_bitmaps);
49
50static int xive_irq_bitmap_add(int base, int count)
51{
52 struct xive_irq_bitmap *xibm;
53
54 xibm = kzalloc(sizeof(*xibm), GFP_ATOMIC);
55 if (!xibm)
56 return -ENOMEM;
57
58 spin_lock_init(&xibm->lock);
59 xibm->base = base;
60 xibm->count = count;
61 xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
62 list_add(&xibm->list, &xive_irq_bitmaps);
63
64 pr_info("Using IRQ range [%x-%x]", xibm->base,
65 xibm->base + xibm->count - 1);
66 return 0;
67}
68
69static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
70{
71 int irq;
72
73 irq = find_first_zero_bit(xibm->bitmap, xibm->count);
74 if (irq != xibm->count) {
75 set_bit(irq, xibm->bitmap);
76 irq += xibm->base;
77 } else {
78 irq = -ENOMEM;
79 }
80
81 return irq;
82}
83
84static int xive_irq_bitmap_alloc(void)
85{
86 struct xive_irq_bitmap *xibm;
87 unsigned long flags;
88 int irq = -ENOENT;
89
90 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
91 spin_lock_irqsave(&xibm->lock, flags);
92 irq = __xive_irq_bitmap_alloc(xibm);
93 spin_unlock_irqrestore(&xibm->lock, flags);
94 if (irq >= 0)
95 break;
96 }
97 return irq;
98}
99
100static void xive_irq_bitmap_free(int irq)
101{
102 unsigned long flags;
103 struct xive_irq_bitmap *xibm;
104
105 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
106 if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
107 spin_lock_irqsave(&xibm->lock, flags);
108 clear_bit(irq - xibm->base, xibm->bitmap);
109 spin_unlock_irqrestore(&xibm->lock, flags);
110 break;
111 }
112 }
113}
114
115
116
117static unsigned int plpar_busy_delay_time(long rc)
118{
119 unsigned int ms = 0;
120
121 if (H_IS_LONG_BUSY(rc)) {
122 ms = get_longbusy_msecs(rc);
123 } else if (rc == H_BUSY) {
124 ms = 10;
125 }
126
127 return ms;
128}
129
130static unsigned int plpar_busy_delay(int rc)
131{
132 unsigned int ms;
133
134 ms = plpar_busy_delay_time(rc);
135 if (ms)
136 mdelay(ms);
137
138 return ms;
139}
140
141
142
143
144
145
146static long plpar_int_reset(unsigned long flags)
147{
148 long rc;
149
150 do {
151 rc = plpar_hcall_norets(H_INT_RESET, flags);
152 } while (plpar_busy_delay(rc));
153
154 if (rc)
155 pr_err("H_INT_RESET failed %ld\n", rc);
156
157 return rc;
158}
159
160static long plpar_int_get_source_info(unsigned long flags,
161 unsigned long lisn,
162 unsigned long *src_flags,
163 unsigned long *eoi_page,
164 unsigned long *trig_page,
165 unsigned long *esb_shift)
166{
167 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
168 long rc;
169
170 do {
171 rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
172 } while (plpar_busy_delay(rc));
173
174 if (rc) {
175 pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
176 return rc;
177 }
178
179 *src_flags = retbuf[0];
180 *eoi_page = retbuf[1];
181 *trig_page = retbuf[2];
182 *esb_shift = retbuf[3];
183
184 pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
185 retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
186
187 return 0;
188}
189
190#define XIVE_SRC_SET_EISN (1ull << (63 - 62))
191#define XIVE_SRC_MASK (1ull << (63 - 63))
192
193static long plpar_int_set_source_config(unsigned long flags,
194 unsigned long lisn,
195 unsigned long target,
196 unsigned long prio,
197 unsigned long sw_irq)
198{
199 long rc;
200
201
202 pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
203 flags, lisn, target, prio, sw_irq);
204
205
206 do {
207 rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
208 target, prio, sw_irq);
209 } while (plpar_busy_delay(rc));
210
211 if (rc) {
212 pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
213 lisn, target, prio, rc);
214 return rc;
215 }
216
217 return 0;
218}
219
220static long plpar_int_get_source_config(unsigned long flags,
221 unsigned long lisn,
222 unsigned long *target,
223 unsigned long *prio,
224 unsigned long *sw_irq)
225{
226 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
227 long rc;
228
229 pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn);
230
231 do {
232 rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
233 target, prio, sw_irq);
234 } while (plpar_busy_delay(rc));
235
236 if (rc) {
237 pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n",
238 lisn, rc);
239 return rc;
240 }
241
242 *target = retbuf[0];
243 *prio = retbuf[1];
244 *sw_irq = retbuf[2];
245
246 pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n",
247 retbuf[0], retbuf[1], retbuf[2]);
248
249 return 0;
250}
251
252static long plpar_int_get_queue_info(unsigned long flags,
253 unsigned long target,
254 unsigned long priority,
255 unsigned long *esn_page,
256 unsigned long *esn_size)
257{
258 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
259 long rc;
260
261 do {
262 rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
263 priority);
264 } while (plpar_busy_delay(rc));
265
266 if (rc) {
267 pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
268 target, priority, rc);
269 return rc;
270 }
271
272 *esn_page = retbuf[0];
273 *esn_size = retbuf[1];
274
275 pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
276 retbuf[0], retbuf[1]);
277
278 return 0;
279}
280
281#define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
282
283static long plpar_int_set_queue_config(unsigned long flags,
284 unsigned long target,
285 unsigned long priority,
286 unsigned long qpage,
287 unsigned long qsize)
288{
289 long rc;
290
291 pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
292 flags, target, priority, qpage, qsize);
293
294 do {
295 rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
296 priority, qpage, qsize);
297 } while (plpar_busy_delay(rc));
298
299 if (rc) {
300 pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
301 target, priority, qpage, rc);
302 return rc;
303 }
304
305 return 0;
306}
307
308static long plpar_int_sync(unsigned long flags, unsigned long lisn)
309{
310 long rc;
311
312 do {
313 rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
314 } while (plpar_busy_delay(rc));
315
316 if (rc) {
317 pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
318 return rc;
319 }
320
321 return 0;
322}
323
324#define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
325
326static long plpar_int_esb(unsigned long flags,
327 unsigned long lisn,
328 unsigned long offset,
329 unsigned long in_data,
330 unsigned long *out_data)
331{
332 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
333 long rc;
334
335 pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
336 flags, lisn, offset, in_data);
337
338 do {
339 rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
340 in_data);
341 } while (plpar_busy_delay(rc));
342
343 if (rc) {
344 pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
345 lisn, offset, rc);
346 return rc;
347 }
348
349 *out_data = retbuf[0];
350
351 return 0;
352}
353
354static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
355{
356 unsigned long read_data;
357 long rc;
358
359 rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
360 lisn, offset, data, &read_data);
361 if (rc)
362 return -1;
363
364 return write ? 0 : read_data;
365}
366
367#define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
368#define XIVE_SRC_LSI (1ull << (63 - 61))
369#define XIVE_SRC_TRIGGER (1ull << (63 - 62))
370#define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
371
372static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
373{
374 long rc;
375 unsigned long flags;
376 unsigned long eoi_page;
377 unsigned long trig_page;
378 unsigned long esb_shift;
379
380 memset(data, 0, sizeof(*data));
381
382 rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
383 &esb_shift);
384 if (rc)
385 return -EINVAL;
386
387 if (flags & XIVE_SRC_H_INT_ESB)
388 data->flags |= XIVE_IRQ_FLAG_H_INT_ESB;
389 if (flags & XIVE_SRC_STORE_EOI)
390 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
391 if (flags & XIVE_SRC_LSI)
392 data->flags |= XIVE_IRQ_FLAG_LSI;
393 data->eoi_page = eoi_page;
394 data->esb_shift = esb_shift;
395 data->trig_page = trig_page;
396
397
398
399
400
401 data->src_chip = XIVE_INVALID_CHIP_ID;
402
403 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
404 if (!data->eoi_mmio) {
405 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
406 return -ENOMEM;
407 }
408
409 data->hw_irq = hw_irq;
410
411
412 if (flags & XIVE_SRC_TRIGGER) {
413 data->trig_mmio = data->eoi_mmio;
414 return 0;
415 }
416
417 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
418 if (!data->trig_mmio) {
419 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
420 return -ENOMEM;
421 }
422 return 0;
423}
424
425static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
426{
427 long rc;
428
429 rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
430 prio, sw_irq);
431
432 return rc == 0 ? 0 : -ENXIO;
433}
434
435static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
436 u32 *sw_irq)
437{
438 long rc;
439 unsigned long h_target;
440 unsigned long h_prio;
441 unsigned long h_sw_irq;
442
443 rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
444 &h_sw_irq);
445
446 *target = h_target;
447 *prio = h_prio;
448 *sw_irq = h_sw_irq;
449
450 return rc == 0 ? 0 : -ENXIO;
451}
452
453
454static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
455 __be32 *qpage, u32 order)
456{
457 s64 rc = 0;
458 unsigned long esn_page;
459 unsigned long esn_size;
460 u64 flags, qpage_phys;
461
462
463 if (order) {
464 if (WARN_ON(!qpage))
465 return -EINVAL;
466 qpage_phys = __pa(qpage);
467 } else {
468 qpage_phys = 0;
469 }
470
471
472 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
473 q->idx = 0;
474 q->toggle = 0;
475
476 rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
477 if (rc) {
478 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
479 target, prio);
480 rc = -EIO;
481 goto fail;
482 }
483
484
485 q->eoi_phys = esn_page;
486
487
488 flags = XIVE_EQ_ALWAYS_NOTIFY;
489
490
491 rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
492 if (rc) {
493 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
494 target, prio);
495 rc = -EIO;
496 } else {
497 q->qpage = qpage;
498 if (is_secure_guest())
499 uv_share_page(PHYS_PFN(qpage_phys),
500 1 << xive_alloc_order(order));
501 }
502fail:
503 return rc;
504}
505
506static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
507 u8 prio)
508{
509 struct xive_q *q = &xc->queue[prio];
510 __be32 *qpage;
511
512 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
513 if (IS_ERR(qpage))
514 return PTR_ERR(qpage);
515
516 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
517 q, prio, qpage, xive_queue_shift);
518}
519
520static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
521 u8 prio)
522{
523 struct xive_q *q = &xc->queue[prio];
524 unsigned int alloc_order;
525 long rc;
526 int hw_cpu = get_hard_smp_processor_id(cpu);
527
528 rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
529 if (rc)
530 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
531 hw_cpu, prio);
532
533 alloc_order = xive_alloc_order(xive_queue_shift);
534 if (is_secure_guest())
535 uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
536 free_pages((unsigned long)q->qpage, alloc_order);
537 q->qpage = NULL;
538}
539
540static bool xive_spapr_match(struct device_node *node)
541{
542
543 return 1;
544}
545
546#ifdef CONFIG_SMP
547static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
548{
549 int irq = xive_irq_bitmap_alloc();
550
551 if (irq < 0) {
552 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
553 return -ENXIO;
554 }
555
556 xc->hw_ipi = irq;
557 return 0;
558}
559
560static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
561{
562 if (xc->hw_ipi == XIVE_BAD_IRQ)
563 return;
564
565 xive_irq_bitmap_free(xc->hw_ipi);
566 xc->hw_ipi = XIVE_BAD_IRQ;
567}
568#endif
569
570static void xive_spapr_shutdown(void)
571{
572 plpar_int_reset(0);
573}
574
575
576
577
578
579static void xive_spapr_update_pending(struct xive_cpu *xc)
580{
581 u8 nsr, cppr;
582 u16 ack;
583
584
585
586
587
588
589
590
591 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
592
593
594 mb();
595
596
597
598
599
600 cppr = ack & 0xff;
601 nsr = ack >> 8;
602
603 if (nsr & TM_QW1_NSR_EO) {
604 if (cppr == 0xff)
605 return;
606
607 xc->pending_prio |= 1 << cppr;
608
609
610
611
612
613 if (cppr >= xc->cppr)
614 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
615 smp_processor_id(), cppr, xc->cppr);
616
617
618 xc->cppr = cppr;
619 }
620}
621
622static void xive_spapr_eoi(u32 hw_irq)
623{
624 ;
625}
626
627static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
628{
629
630 pr_debug("(HW value: %08x %08x %08x)\n",
631 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
632 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
633 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
634}
635
636static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
637{
638 ;
639}
640
641static void xive_spapr_sync_source(u32 hw_irq)
642{
643
644 plpar_int_sync(0, hw_irq);
645}
646
647static const struct xive_ops xive_spapr_ops = {
648 .populate_irq_data = xive_spapr_populate_irq_data,
649 .configure_irq = xive_spapr_configure_irq,
650 .get_irq_config = xive_spapr_get_irq_config,
651 .setup_queue = xive_spapr_setup_queue,
652 .cleanup_queue = xive_spapr_cleanup_queue,
653 .match = xive_spapr_match,
654 .shutdown = xive_spapr_shutdown,
655 .update_pending = xive_spapr_update_pending,
656 .eoi = xive_spapr_eoi,
657 .setup_cpu = xive_spapr_setup_cpu,
658 .teardown_cpu = xive_spapr_teardown_cpu,
659 .sync_source = xive_spapr_sync_source,
660 .esb_rw = xive_spapr_esb_rw,
661#ifdef CONFIG_SMP
662 .get_ipi = xive_spapr_get_ipi,
663 .put_ipi = xive_spapr_put_ipi,
664#endif
665 .name = "spapr",
666};
667
668
669
670
671static bool xive_get_max_prio(u8 *max_prio)
672{
673 struct device_node *rootdn;
674 const __be32 *reg;
675 u32 len;
676 int prio, found;
677
678 rootdn = of_find_node_by_path("/");
679 if (!rootdn) {
680 pr_err("not root node found !\n");
681 return false;
682 }
683
684 reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
685 if (!reg) {
686 pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
687 return false;
688 }
689
690 if (len % (2 * sizeof(u32)) != 0) {
691 pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
692 return false;
693 }
694
695
696
697
698
699 found = 0xFF;
700 for (prio = 0; prio < 8; prio++) {
701 int reserved = 0;
702 int i;
703
704 for (i = 0; i < len / (2 * sizeof(u32)); i++) {
705 int base = be32_to_cpu(reg[2 * i]);
706 int range = be32_to_cpu(reg[2 * i + 1]);
707
708 if (prio >= base && prio < base + range)
709 reserved++;
710 }
711
712 if (!reserved)
713 found = prio;
714 }
715
716 if (found == 0xFF) {
717 pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
718 return false;
719 }
720
721 *max_prio = found;
722 return true;
723}
724
725static const u8 *get_vec5_feature(unsigned int index)
726{
727 unsigned long root, chosen;
728 int size;
729 const u8 *vec5;
730
731 root = of_get_flat_dt_root();
732 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
733 if (chosen == -FDT_ERR_NOTFOUND)
734 return NULL;
735
736 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
737 if (!vec5)
738 return NULL;
739
740 if (size <= index)
741 return NULL;
742
743 return vec5 + index;
744}
745
746static bool xive_spapr_disabled(void)
747{
748 const u8 *vec5_xive;
749
750 vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
751 if (vec5_xive) {
752 u8 val;
753
754 val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
755 switch (val) {
756 case OV5_FEAT(OV5_XIVE_EITHER):
757 case OV5_FEAT(OV5_XIVE_LEGACY):
758 break;
759 case OV5_FEAT(OV5_XIVE_EXPLOIT):
760
761 if (xive_cmdline_disabled)
762 pr_warn("WARNING: Ignoring cmdline option xive=off\n");
763 return false;
764 default:
765 pr_warn("%s: Unknown xive support option: 0x%x\n",
766 __func__, val);
767 break;
768 }
769 }
770
771 return xive_cmdline_disabled;
772}
773
774bool __init xive_spapr_init(void)
775{
776 struct device_node *np;
777 struct resource r;
778 void __iomem *tima;
779 struct property *prop;
780 u8 max_prio;
781 u32 val;
782 u32 len;
783 const __be32 *reg;
784 int i;
785
786 if (xive_spapr_disabled())
787 return false;
788
789 pr_devel("%s()\n", __func__);
790 np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
791 if (!np) {
792 pr_devel("not found !\n");
793 return false;
794 }
795 pr_devel("Found %s\n", np->full_name);
796
797
798 if (of_address_to_resource(np, 1, &r)) {
799 pr_err("Failed to get thread mgmnt area resource\n");
800 return false;
801 }
802 tima = ioremap(r.start, resource_size(&r));
803 if (!tima) {
804 pr_err("Failed to map thread mgmnt area\n");
805 return false;
806 }
807
808 if (!xive_get_max_prio(&max_prio))
809 return false;
810
811
812 reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
813 if (!reg) {
814 pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
815 return false;
816 }
817
818 if (len % (2 * sizeof(u32)) != 0) {
819 pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
820 return false;
821 }
822
823 for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2)
824 xive_irq_bitmap_add(be32_to_cpu(reg[0]),
825 be32_to_cpu(reg[1]));
826
827
828 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
829 xive_queue_shift = val;
830 if (val == PAGE_SHIFT)
831 break;
832 }
833
834
835 if (!xive_core_init(&xive_spapr_ops, tima, TM_QW1_OS, max_prio))
836 return false;
837
838 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
839 return true;
840}
841