1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/export.h>
14#include <linux/string.h>
15#include <linux/sched.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/reboot.h>
19#include <linux/delay.h>
20#include <linux/initrd.h>
21#include <linux/seq_file.h>
22#include <linux/ioport.h>
23#include <linux/console.h>
24#include <linux/utsname.h>
25#include <linux/tty.h>
26#include <linux/root_dev.h>
27#include <linux/notifier.h>
28#include <linux/cpu.h>
29#include <linux/unistd.h>
30#include <linux/serial.h>
31#include <linux/serial_8250.h>
32#include <linux/memblock.h>
33#include <linux/pci.h>
34#include <linux/lockdep.h>
35#include <linux/memory.h>
36#include <linux/nmi.h>
37
38#include <asm/debugfs.h>
39#include <asm/io.h>
40#include <asm/kdump.h>
41#include <asm/prom.h>
42#include <asm/processor.h>
43#include <asm/pgtable.h>
44#include <asm/smp.h>
45#include <asm/elf.h>
46#include <asm/machdep.h>
47#include <asm/paca.h>
48#include <asm/time.h>
49#include <asm/cputable.h>
50#include <asm/dt_cpu_ftrs.h>
51#include <asm/sections.h>
52#include <asm/btext.h>
53#include <asm/nvram.h>
54#include <asm/setup.h>
55#include <asm/rtas.h>
56#include <asm/iommu.h>
57#include <asm/serial.h>
58#include <asm/cache.h>
59#include <asm/page.h>
60#include <asm/mmu.h>
61#include <asm/firmware.h>
62#include <asm/xmon.h>
63#include <asm/udbg.h>
64#include <asm/kexec.h>
65#include <asm/code-patching.h>
66#include <asm/livepatch.h>
67#include <asm/opal.h>
68#include <asm/cputhreads.h>
69#include <asm/hw_irq.h>
70
71#include "setup.h"
72
73#ifdef DEBUG
74#define DBG(fmt...) udbg_printf(fmt)
75#else
76#define DBG(fmt...)
77#endif
78
79int spinning_secondaries;
80u64 ppc64_pft_size;
81
82struct ppc64_caches ppc64_caches = {
83 .l1d = {
84 .block_size = 0x40,
85 .log_block_size = 6,
86 },
87 .l1i = {
88 .block_size = 0x40,
89 .log_block_size = 6
90 },
91};
92EXPORT_SYMBOL_GPL(ppc64_caches);
93
94#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
95void __init setup_tlb_core_data(void)
96{
97 int cpu;
98
99 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
100
101 for_each_possible_cpu(cpu) {
102 int first = cpu_first_thread_sibling(cpu);
103
104
105
106
107
108
109 if (cpu_first_thread_sibling(boot_cpuid) == first)
110 first = boot_cpuid;
111
112 paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
113
114
115
116
117
118
119
120 WARN_ONCE(smt_enabled_at_boot >= 2 &&
121 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
122 book3e_htw_mode != PPC_HTW_E6500,
123 "%s: unsupported MMU configuration\n", __func__);
124 }
125}
126#endif
127
128#ifdef CONFIG_SMP
129
130static char *smt_enabled_cmdline;
131
132
133void __init check_smt_enabled(void)
134{
135 struct device_node *dn;
136 const char *smt_option;
137
138
139 smt_enabled_at_boot = threads_per_core;
140
141
142 if (smt_enabled_cmdline) {
143 if (!strcmp(smt_enabled_cmdline, "on"))
144 smt_enabled_at_boot = threads_per_core;
145 else if (!strcmp(smt_enabled_cmdline, "off"))
146 smt_enabled_at_boot = 0;
147 else {
148 int smt;
149 int rc;
150
151 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
152 if (!rc)
153 smt_enabled_at_boot =
154 min(threads_per_core, smt);
155 }
156 } else {
157 dn = of_find_node_by_path("/options");
158 if (dn) {
159 smt_option = of_get_property(dn, "ibm,smt-enabled",
160 NULL);
161
162 if (smt_option) {
163 if (!strcmp(smt_option, "on"))
164 smt_enabled_at_boot = threads_per_core;
165 else if (!strcmp(smt_option, "off"))
166 smt_enabled_at_boot = 0;
167 }
168
169 of_node_put(dn);
170 }
171 }
172}
173
174
175static int __init early_smt_enabled(char *p)
176{
177 smt_enabled_cmdline = p;
178 return 0;
179}
180early_param("smt-enabled", early_smt_enabled);
181
182#endif
183
184
185static void __init fixup_boot_paca(void)
186{
187
188 get_paca()->cpu_start = 1;
189
190 get_paca()->data_offset = 0;
191
192 irq_soft_mask_set(IRQS_DISABLED);
193}
194
195static void __init configure_exceptions(void)
196{
197
198
199
200
201 setup_kdump_trampoline();
202
203
204 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
205
206 pseries_enable_reloc_on_exc();
207
208
209
210
211
212
213
214
215
216#ifdef __LITTLE_ENDIAN__
217 pseries_little_endian_exceptions();
218#endif
219 } else {
220
221 if (firmware_has_feature(FW_FEATURE_OPAL))
222 opal_configure_cores();
223
224
225 }
226}
227
228static void cpu_ready_for_interrupts(void)
229{
230
231
232
233
234
235
236
237 if (cpu_has_feature(CPU_FTR_HVMODE) &&
238 cpu_has_feature(CPU_FTR_ARCH_207S)) {
239 unsigned long lpcr = mfspr(SPRN_LPCR);
240 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
241 }
242
243
244
245
246
247
248
249
250
251 if (cpu_has_feature(CPU_FTR_HVMODE)) {
252 if (cpu_has_feature(CPU_FTR_TM_COMP))
253 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
254 else
255 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
256 }
257
258
259 get_paca()->kernel_msr = MSR_KERNEL;
260}
261
262unsigned long spr_default_dscr = 0;
263
264void __init record_spr_defaults(void)
265{
266 if (early_cpu_has_feature(CPU_FTR_DSCR))
267 spr_default_dscr = mfspr(SPRN_DSCR);
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289void __init early_setup(unsigned long dt_ptr)
290{
291 static __initdata struct paca_struct boot_paca;
292
293
294
295
296 if (!dt_cpu_ftrs_init(__va(dt_ptr)))
297
298 identify_cpu(0, mfspr(SPRN_PVR));
299
300
301 initialise_paca(&boot_paca, 0);
302 setup_paca(&boot_paca);
303 fixup_boot_paca();
304
305
306
307
308 udbg_early_init();
309
310 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
311
312
313
314
315
316
317 early_init_devtree(__va(dt_ptr));
318
319
320 if (boot_cpuid != 0) {
321
322 memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
323 }
324 setup_paca(paca_ptrs[boot_cpuid]);
325 fixup_boot_paca();
326
327
328
329
330
331 configure_exceptions();
332
333
334 apply_feature_fixups();
335 setup_feature_keys();
336
337
338 early_init_mmu();
339
340
341
342
343
344
345 record_spr_defaults();
346
347
348
349
350
351
352 cpu_ready_for_interrupts();
353
354
355
356
357
358
359 this_cpu_enable_ftrace();
360
361 DBG(" <- early_setup()\n");
362
363#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
364
365
366
367
368
369
370
371
372 btext_map();
373#endif
374}
375
376#ifdef CONFIG_SMP
377void early_setup_secondary(void)
378{
379
380 irq_soft_mask_set(IRQS_DISABLED);
381
382
383 early_init_mmu_secondary();
384
385
386
387
388
389
390 cpu_ready_for_interrupts();
391}
392
393#endif
394
395void panic_smp_self_stop(void)
396{
397 hard_irq_disable();
398 spin_begin();
399 while (1)
400 spin_cpu_relax();
401}
402
403#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
404static bool use_spinloop(void)
405{
406 if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
407
408
409
410
411
412 if (firmware_has_feature(FW_FEATURE_OPAL))
413 return false;
414 return true;
415 }
416
417
418
419
420
421 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
422}
423
424void smp_release_cpus(void)
425{
426 unsigned long *ptr;
427 int i;
428
429 if (!use_spinloop())
430 return;
431
432 DBG(" -> smp_release_cpus()\n");
433
434
435
436
437
438
439
440 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
441 - PHYSICAL_START);
442 *ptr = ppc_function_entry(generic_secondary_smp_init);
443
444
445 for (i = 0; i < 100000; i++) {
446 mb();
447 HMT_low();
448 if (spinning_secondaries == 0)
449 break;
450 udelay(1);
451 }
452 DBG("spinning_secondaries = %d\n", spinning_secondaries);
453
454 DBG(" <- smp_release_cpus()\n");
455}
456#endif
457
458
459
460
461
462
463
464
465
466static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
467 u32 bsize, u32 sets)
468{
469 info->size = size;
470 info->sets = sets;
471 info->line_size = lsize;
472 info->block_size = bsize;
473 info->log_block_size = __ilog2(bsize);
474 if (bsize)
475 info->blocks_per_page = PAGE_SIZE / bsize;
476 else
477 info->blocks_per_page = 0;
478
479 if (sets == 0)
480 info->assoc = 0xffff;
481 else
482 info->assoc = size / (sets * lsize);
483}
484
485static bool __init parse_cache_info(struct device_node *np,
486 bool icache,
487 struct ppc_cache_info *info)
488{
489 static const char *ipropnames[] __initdata = {
490 "i-cache-size",
491 "i-cache-sets",
492 "i-cache-block-size",
493 "i-cache-line-size",
494 };
495 static const char *dpropnames[] __initdata = {
496 "d-cache-size",
497 "d-cache-sets",
498 "d-cache-block-size",
499 "d-cache-line-size",
500 };
501 const char **propnames = icache ? ipropnames : dpropnames;
502 const __be32 *sizep, *lsizep, *bsizep, *setsp;
503 u32 size, lsize, bsize, sets;
504 bool success = true;
505
506 size = 0;
507 sets = -1u;
508 lsize = bsize = cur_cpu_spec->dcache_bsize;
509 sizep = of_get_property(np, propnames[0], NULL);
510 if (sizep != NULL)
511 size = be32_to_cpu(*sizep);
512 setsp = of_get_property(np, propnames[1], NULL);
513 if (setsp != NULL)
514 sets = be32_to_cpu(*setsp);
515 bsizep = of_get_property(np, propnames[2], NULL);
516 lsizep = of_get_property(np, propnames[3], NULL);
517 if (bsizep == NULL)
518 bsizep = lsizep;
519 if (lsizep != NULL)
520 lsize = be32_to_cpu(*lsizep);
521 if (bsizep != NULL)
522 bsize = be32_to_cpu(*bsizep);
523 if (sizep == NULL || bsizep == NULL || lsizep == NULL)
524 success = false;
525
526
527
528
529
530
531
532 if (sets == 1)
533 sets = 0;
534 else if (sets == 0)
535 sets = 1;
536
537 init_cache_info(info, size, lsize, bsize, sets);
538
539 return success;
540}
541
542void __init initialize_cache_info(void)
543{
544 struct device_node *cpu = NULL, *l2, *l3 = NULL;
545 u32 pvr;
546
547 DBG(" -> initialize_cache_info()\n");
548
549
550
551
552
553
554
555 pvr = PVR_VER(mfspr(SPRN_PVR));
556 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
557 pvr == PVR_POWER8NVL) {
558
559 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
560 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
561 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
562 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
563 } else
564 cpu = of_find_node_by_type(NULL, "cpu");
565
566
567
568
569
570 if (cpu) {
571 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
572 DBG("Argh, can't find dcache properties !\n");
573
574 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
575 DBG("Argh, can't find icache properties !\n");
576
577
578
579
580
581 l2 = of_find_next_cache_node(cpu);
582 of_node_put(cpu);
583 if (l2) {
584 parse_cache_info(l2, false, &ppc64_caches.l2);
585 l3 = of_find_next_cache_node(l2);
586 of_node_put(l2);
587 }
588 if (l3) {
589 parse_cache_info(l3, false, &ppc64_caches.l3);
590 of_node_put(l3);
591 }
592 }
593
594
595 dcache_bsize = ppc64_caches.l1d.block_size;
596 icache_bsize = ppc64_caches.l1i.block_size;
597
598 cur_cpu_spec->dcache_bsize = dcache_bsize;
599 cur_cpu_spec->icache_bsize = icache_bsize;
600
601 DBG(" <- initialize_cache_info()\n");
602}
603
604
605
606
607
608
609
610
611
612
613__init u64 ppc64_bolted_size(void)
614{
615#ifdef CONFIG_PPC_BOOK3E
616
617
618 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
619 return linear_map_top;
620
621 return 1ul << 30;
622#else
623
624 if (early_radix_enabled())
625 return ULONG_MAX;
626
627
628 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
629 return 1UL << SID_SHIFT_1T;
630 return 1UL << SID_SHIFT;
631#endif
632}
633
634static void *__init alloc_stack(unsigned long limit, int cpu)
635{
636 unsigned long pa;
637
638 pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
639 early_cpu_to_node(cpu), MEMBLOCK_NONE);
640 if (!pa) {
641 pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
642 if (!pa)
643 panic("cannot allocate stacks");
644 }
645
646 return __va(pa);
647}
648
649void __init irqstack_early_init(void)
650{
651 u64 limit = ppc64_bolted_size();
652 unsigned int i;
653
654
655
656
657
658
659 for_each_possible_cpu(i) {
660 softirq_ctx[i] = alloc_stack(limit, i);
661 hardirq_ctx[i] = alloc_stack(limit, i);
662 }
663}
664
665#ifdef CONFIG_PPC_BOOK3E
666void __init exc_lvl_early_init(void)
667{
668 unsigned int i;
669
670 for_each_possible_cpu(i) {
671 void *sp;
672
673 sp = alloc_stack(ULONG_MAX, i);
674 critirq_ctx[i] = sp;
675 paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
676
677 sp = alloc_stack(ULONG_MAX, i);
678 dbgirq_ctx[i] = sp;
679 paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
680
681 sp = alloc_stack(ULONG_MAX, i);
682 mcheckirq_ctx[i] = sp;
683 paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
684 }
685
686 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
687 patch_exception(0x040, exc_debug_debug_book3e);
688}
689#endif
690
691
692
693
694
695
696
697
698
699static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
700{
701 ti->task = NULL;
702 ti->cpu = cpu;
703 ti->preempt_count = 0;
704 ti->local_flags = 0;
705 ti->flags = 0;
706 klp_init_thread_info(ti);
707}
708
709
710
711
712
713
714void __init emergency_stack_init(void)
715{
716 u64 limit;
717 unsigned int i;
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733 limit = min(ppc64_bolted_size(), ppc64_rma_size);
734
735 for_each_possible_cpu(i) {
736 struct thread_info *ti;
737
738 ti = alloc_stack(limit, i);
739 memset(ti, 0, THREAD_SIZE);
740 emerg_stack_init_thread_info(ti, i);
741 paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE;
742
743#ifdef CONFIG_PPC_BOOK3S_64
744
745 ti = alloc_stack(limit, i);
746 memset(ti, 0, THREAD_SIZE);
747 emerg_stack_init_thread_info(ti, i);
748 paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE;
749
750
751 ti = alloc_stack(limit, i);
752 memset(ti, 0, THREAD_SIZE);
753 emerg_stack_init_thread_info(ti, i);
754 paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE;
755#endif
756 }
757}
758
759#ifdef CONFIG_SMP
760#define PCPU_DYN_SIZE ()
761
762static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
763{
764 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
765 MEMBLOCK_ALLOC_ACCESSIBLE,
766 early_cpu_to_node(cpu));
767
768}
769
770static void __init pcpu_fc_free(void *ptr, size_t size)
771{
772 memblock_free(__pa(ptr), size);
773}
774
775static int pcpu_cpu_distance(unsigned int from, unsigned int to)
776{
777 if (early_cpu_to_node(from) == early_cpu_to_node(to))
778 return LOCAL_DISTANCE;
779 else
780 return REMOTE_DISTANCE;
781}
782
783unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
784EXPORT_SYMBOL(__per_cpu_offset);
785
786void __init setup_per_cpu_areas(void)
787{
788 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
789 size_t atom_size;
790 unsigned long delta;
791 unsigned int cpu;
792 int rc;
793
794
795
796
797
798
799 if (mmu_linear_psize == MMU_PAGE_4K)
800 atom_size = PAGE_SIZE;
801 else
802 atom_size = 1 << 20;
803
804 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
805 pcpu_fc_alloc, pcpu_fc_free);
806 if (rc < 0)
807 panic("cannot initialize percpu area (err=%d)", rc);
808
809 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
810 for_each_possible_cpu(cpu) {
811 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
812 paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
813 }
814}
815#endif
816
817#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
818unsigned long memory_block_size_bytes(void)
819{
820 if (ppc_md.memory_block_size)
821 return ppc_md.memory_block_size();
822
823 return MIN_MEMORY_BLOCK_SIZE;
824}
825#endif
826
827#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
828struct ppc_pci_io ppc_pci_io;
829EXPORT_SYMBOL(ppc_pci_io);
830#endif
831
832#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
833u64 hw_nmi_get_sample_period(int watchdog_thresh)
834{
835 return ppc_proc_freq * watchdog_thresh;
836}
837#endif
838
839
840
841
842
843
844
845
846
847static int __init disable_hardlockup_detector(void)
848{
849#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
850 hardlockup_detector_disable();
851#else
852 if (firmware_has_feature(FW_FEATURE_LPAR))
853 hardlockup_detector_disable();
854#endif
855
856 return 0;
857}
858early_initcall(disable_hardlockup_detector);
859
860#ifdef CONFIG_PPC_BOOK3S_64
861static enum l1d_flush_type enabled_flush_types;
862static void *l1d_flush_fallback_area;
863static bool no_rfi_flush;
864bool rfi_flush;
865
866static int __init handle_no_rfi_flush(char *p)
867{
868 pr_info("rfi-flush: disabled on command line.");
869 no_rfi_flush = true;
870 return 0;
871}
872early_param("no_rfi_flush", handle_no_rfi_flush);
873
874
875
876
877
878static int __init handle_no_pti(char *p)
879{
880 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
881 handle_no_rfi_flush(NULL);
882 return 0;
883}
884early_param("nopti", handle_no_pti);
885
886static void do_nothing(void *unused)
887{
888
889
890
891
892}
893
894void rfi_flush_enable(bool enable)
895{
896 if (enable) {
897 do_rfi_flush_fixups(enabled_flush_types);
898 on_each_cpu(do_nothing, NULL, 1);
899 } else
900 do_rfi_flush_fixups(L1D_FLUSH_NONE);
901
902 rfi_flush = enable;
903}
904
905static void __ref init_fallback_flush(void)
906{
907 u64 l1d_size, limit;
908 int cpu;
909
910
911 if (l1d_flush_fallback_area)
912 return;
913
914 l1d_size = ppc64_caches.l1d.size;
915
916
917
918
919
920
921
922
923 if (!l1d_size)
924 l1d_size = (64 * 1024);
925
926 limit = min(ppc64_bolted_size(), ppc64_rma_size);
927
928
929
930
931
932
933 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
934 memset(l1d_flush_fallback_area, 0, l1d_size * 2);
935
936 for_each_possible_cpu(cpu) {
937 struct paca_struct *paca = paca_ptrs[cpu];
938 paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
939 paca->l1d_flush_size = l1d_size;
940 }
941}
942
943void setup_rfi_flush(enum l1d_flush_type types, bool enable)
944{
945 if (types & L1D_FLUSH_FALLBACK) {
946 pr_info("rfi-flush: fallback displacement flush available\n");
947 init_fallback_flush();
948 }
949
950 if (types & L1D_FLUSH_ORI)
951 pr_info("rfi-flush: ori type flush available\n");
952
953 if (types & L1D_FLUSH_MTTRIG)
954 pr_info("rfi-flush: mttrig type flush available\n");
955
956 enabled_flush_types = types;
957
958 if (!no_rfi_flush && !cpu_mitigations_off())
959 rfi_flush_enable(enable);
960}
961
962#ifdef CONFIG_DEBUG_FS
963static int rfi_flush_set(void *data, u64 val)
964{
965 bool enable;
966
967 if (val == 1)
968 enable = true;
969 else if (val == 0)
970 enable = false;
971 else
972 return -EINVAL;
973
974
975 if (enable != rfi_flush)
976 rfi_flush_enable(enable);
977
978 return 0;
979}
980
981static int rfi_flush_get(void *data, u64 *val)
982{
983 *val = rfi_flush ? 1 : 0;
984 return 0;
985}
986
987DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
988
989static __init int rfi_flush_debugfs_init(void)
990{
991 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
992 return 0;
993}
994device_initcall(rfi_flush_debugfs_init);
995#endif
996#endif
997