linux/arch/sh/kernel/setup.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/setup.c
   3 *
   4 * This file handles the architecture-dependent parts of initialization
   5 *
   6 *  Copyright (C) 1999  Niibe Yutaka
   7 *  Copyright (C) 2002 - 2007 Paul Mundt
   8 */
   9#include <linux/screen_info.h>
  10#include <linux/ioport.h>
  11#include <linux/init.h>
  12#include <linux/initrd.h>
  13#include <linux/bootmem.h>
  14#include <linux/console.h>
  15#include <linux/seq_file.h>
  16#include <linux/root_dev.h>
  17#include <linux/utsname.h>
  18#include <linux/nodemask.h>
  19#include <linux/cpu.h>
  20#include <linux/pfn.h>
  21#include <linux/fs.h>
  22#include <linux/mm.h>
  23#include <linux/kexec.h>
  24#include <linux/module.h>
  25#include <linux/smp.h>
  26#include <linux/err.h>
  27#include <linux/debugfs.h>
  28#include <linux/crash_dump.h>
  29#include <linux/mmzone.h>
  30#include <linux/clk.h>
  31#include <linux/delay.h>
  32#include <linux/platform_device.h>
  33#include <linux/lmb.h>
  34#include <asm/uaccess.h>
  35#include <asm/io.h>
  36#include <asm/page.h>
  37#include <asm/elf.h>
  38#include <asm/sections.h>
  39#include <asm/irq.h>
  40#include <asm/setup.h>
  41#include <asm/clock.h>
  42#include <asm/mmu_context.h>
  43
  44/*
  45 * Initialize loops_per_jiffy as 10000000 (1000MIPS).
  46 * This value will be used at the very early stage of serial setup.
  47 * The bigger value means no problem.
  48 */
  49struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
  50        [0] = {
  51                .type                   = CPU_SH_NONE,
  52                .family                 = CPU_FAMILY_UNKNOWN,
  53                .loops_per_jiffy        = 10000000,
  54        },
  55};
  56EXPORT_SYMBOL(cpu_data);
  57
  58/*
  59 * The machine vector. First entry in .machvec.init, or clobbered by
  60 * sh_mv= on the command line, prior to .machvec.init teardown.
  61 */
  62struct sh_machine_vector sh_mv = { .mv_name = "generic", };
  63EXPORT_SYMBOL(sh_mv);
  64
  65#ifdef CONFIG_VT
  66struct screen_info screen_info;
  67#endif
  68
  69extern int root_mountflags;
  70
  71#define RAMDISK_IMAGE_START_MASK        0x07FF
  72#define RAMDISK_PROMPT_FLAG             0x8000
  73#define RAMDISK_LOAD_FLAG               0x4000
  74
  75static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
  76
  77static struct resource code_resource = {
  78        .name = "Kernel code",
  79        .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  80};
  81
  82static struct resource data_resource = {
  83        .name = "Kernel data",
  84        .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
  85};
  86
  87static struct resource bss_resource = {
  88        .name   = "Kernel bss",
  89        .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
  90};
  91
  92unsigned long memory_start;
  93EXPORT_SYMBOL(memory_start);
  94unsigned long memory_end = 0;
  95EXPORT_SYMBOL(memory_end);
  96
  97static struct resource mem_resources[MAX_NUMNODES];
  98
  99int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
 100
 101static int __init early_parse_mem(char *p)
 102{
 103        unsigned long size;
 104
 105        memory_start = (unsigned long)__va(__MEMORY_START);
 106        size = memparse(p, &p);
 107
 108        if (size > __MEMORY_SIZE) {
 109                printk(KERN_ERR
 110                        "Using mem= to increase the size of kernel memory "
 111                        "is not allowed.\n"
 112                        "  Recompile the kernel with the correct value for "
 113                        "CONFIG_MEMORY_SIZE.\n");
 114                return 0;
 115        }
 116
 117        memory_end = memory_start + size;
 118
 119        return 0;
 120}
 121early_param("mem", early_parse_mem);
 122
 123/*
 124 * Register fully available low RAM pages with the bootmem allocator.
 125 */
 126static void __init register_bootmem_low_pages(void)
 127{
 128        unsigned long curr_pfn, last_pfn, pages;
 129
 130        /*
 131         * We are rounding up the start address of usable memory:
 132         */
 133        curr_pfn = PFN_UP(__MEMORY_START);
 134
 135        /*
 136         * ... and at the end of the usable range downwards:
 137         */
 138        last_pfn = PFN_DOWN(__pa(memory_end));
 139
 140        if (last_pfn > max_low_pfn)
 141                last_pfn = max_low_pfn;
 142
 143        pages = last_pfn - curr_pfn;
 144        free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
 145}
 146
 147#ifdef CONFIG_KEXEC
 148static void __init reserve_crashkernel(void)
 149{
 150        unsigned long long free_mem;
 151        unsigned long long crash_size, crash_base;
 152        void *vp;
 153        int ret;
 154
 155        free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
 156
 157        ret = parse_crashkernel(boot_command_line, free_mem,
 158                        &crash_size, &crash_base);
 159        if (ret == 0 && crash_size) {
 160                if (crash_base <= 0) {
 161                        vp = alloc_bootmem_nopanic(crash_size);
 162                        if (!vp) {
 163                                printk(KERN_INFO "crashkernel allocation "
 164                                       "failed\n");
 165                                return;
 166                        }
 167                        crash_base = __pa(vp);
 168                } else if (reserve_bootmem(crash_base, crash_size,
 169                                        BOOTMEM_EXCLUSIVE) < 0) {
 170                        printk(KERN_INFO "crashkernel reservation failed - "
 171                                        "memory is in use\n");
 172                        return;
 173                }
 174
 175                printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
 176                                "for crashkernel (System RAM: %ldMB)\n",
 177                                (unsigned long)(crash_size >> 20),
 178                                (unsigned long)(crash_base >> 20),
 179                                (unsigned long)(free_mem >> 20));
 180                crashk_res.start = crash_base;
 181                crashk_res.end   = crash_base + crash_size - 1;
 182                insert_resource(&iomem_resource, &crashk_res);
 183        }
 184}
 185#else
 186static inline void __init reserve_crashkernel(void)
 187{}
 188#endif
 189
 190void __cpuinit calibrate_delay(void)
 191{
 192        struct clk *clk = clk_get(NULL, "cpu_clk");
 193
 194        if (IS_ERR(clk))
 195                panic("Need a sane CPU clock definition!");
 196
 197        loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ;
 198
 199        printk(KERN_INFO "Calibrating delay loop (skipped)... "
 200                         "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n",
 201                         loops_per_jiffy/(500000/HZ),
 202                         (loops_per_jiffy/(5000/HZ)) % 100,
 203                         loops_per_jiffy);
 204}
 205
 206void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
 207                                                unsigned long end_pfn)
 208{
 209        struct resource *res = &mem_resources[nid];
 210
 211        WARN_ON(res->name); /* max one active range per node for now */
 212
 213        res->name = "System RAM";
 214        res->start = start_pfn << PAGE_SHIFT;
 215        res->end = (end_pfn << PAGE_SHIFT) - 1;
 216        res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 217        if (request_resource(&iomem_resource, res)) {
 218                pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
 219                       start_pfn, end_pfn);
 220                return;
 221        }
 222
 223        /*
 224         *  We don't know which RAM region contains kernel data,
 225         *  so we try it repeatedly and let the resource manager
 226         *  test it.
 227         */
 228        request_resource(res, &code_resource);
 229        request_resource(res, &data_resource);
 230        request_resource(res, &bss_resource);
 231
 232        add_active_range(nid, start_pfn, end_pfn);
 233}
 234
 235void __init setup_bootmem_allocator(unsigned long free_pfn)
 236{
 237        unsigned long bootmap_size;
 238        unsigned long bootmap_pages, bootmem_paddr;
 239        u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT;
 240        int i;
 241
 242        bootmap_pages = bootmem_bootmap_pages(total_pages);
 243
 244        bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
 245
 246        /*
 247         * Find a proper area for the bootmem bitmap. After this
 248         * bootstrap step all allocations (until the page allocator
 249         * is intact) must be done via bootmem_alloc().
 250         */
 251        bootmap_size = init_bootmem_node(NODE_DATA(0),
 252                                         bootmem_paddr >> PAGE_SHIFT,
 253                                         min_low_pfn, max_low_pfn);
 254
 255        /* Add active regions with valid PFNs. */
 256        for (i = 0; i < lmb.memory.cnt; i++) {
 257                unsigned long start_pfn, end_pfn;
 258                start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
 259                end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
 260                __add_active_range(0, start_pfn, end_pfn);
 261        }
 262
 263        /*
 264         * Add all physical memory to the bootmem map and mark each
 265         * area as present.
 266         */
 267        register_bootmem_low_pages();
 268
 269        /* Reserve the sections we're already using. */
 270        for (i = 0; i < lmb.reserved.cnt; i++)
 271                reserve_bootmem(lmb.reserved.region[i].base,
 272                                lmb_size_bytes(&lmb.reserved, i),
 273                                BOOTMEM_DEFAULT);
 274
 275        node_set_online(0);
 276
 277        sparse_memory_present_with_active_regions(0);
 278
 279#ifdef CONFIG_BLK_DEV_INITRD
 280        ROOT_DEV = Root_RAM0;
 281
 282        if (LOADER_TYPE && INITRD_START) {
 283                unsigned long initrd_start_phys = INITRD_START + __MEMORY_START;
 284
 285                if (initrd_start_phys + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
 286                        reserve_bootmem(initrd_start_phys, INITRD_SIZE,
 287                                        BOOTMEM_DEFAULT);
 288                        initrd_start = (unsigned long)__va(initrd_start_phys);
 289                        initrd_end = initrd_start + INITRD_SIZE;
 290                } else {
 291                        printk("initrd extends beyond end of memory "
 292                               "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
 293                               initrd_start_phys + INITRD_SIZE,
 294                               (unsigned long)PFN_PHYS(max_low_pfn));
 295                        initrd_start = 0;
 296                }
 297        }
 298#endif
 299
 300        reserve_crashkernel();
 301}
 302
 303#ifndef CONFIG_NEED_MULTIPLE_NODES
 304static void __init setup_memory(void)
 305{
 306        unsigned long start_pfn;
 307        u64 base = min_low_pfn << PAGE_SHIFT;
 308        u64 size = (max_low_pfn << PAGE_SHIFT) - base;
 309
 310        /*
 311         * Partially used pages are not usable - thus
 312         * we are rounding upwards:
 313         */
 314        start_pfn = PFN_UP(__pa(_end));
 315
 316        lmb_add(base, size);
 317
 318        /*
 319         * Reserve the kernel text and
 320         * Reserve the bootmem bitmap. We do this in two steps (first step
 321         * was init_bootmem()), because this catches the (definitely buggy)
 322         * case of us accidentally initializing the bootmem allocator with
 323         * an invalid RAM area.
 324         */
 325        lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
 326                    (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
 327                    (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
 328
 329        /*
 330         * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
 331         */
 332        if (CONFIG_ZERO_PAGE_OFFSET != 0)
 333                lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
 334
 335        lmb_analyze();
 336        lmb_dump_all();
 337
 338        setup_bootmem_allocator(start_pfn);
 339}
 340#else
 341extern void __init setup_memory(void);
 342#endif
 343
 344/*
 345 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
 346 * is_kdump_kernel() to determine if we are booting after a panic. Hence
 347 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
 348 */
 349#ifdef CONFIG_CRASH_DUMP
 350/* elfcorehdr= specifies the location of elf core header
 351 * stored by the crashed kernel.
 352 */
 353static int __init parse_elfcorehdr(char *arg)
 354{
 355        if (!arg)
 356                return -EINVAL;
 357        elfcorehdr_addr = memparse(arg, &arg);
 358        return 0;
 359}
 360early_param("elfcorehdr", parse_elfcorehdr);
 361#endif
 362
 363void __init __attribute__ ((weak)) plat_early_device_setup(void)
 364{
 365}
 366
 367void __init setup_arch(char **cmdline_p)
 368{
 369        enable_mmu();
 370
 371        ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
 372
 373        printk(KERN_NOTICE "Boot params:\n"
 374                           "... MOUNT_ROOT_RDONLY - %08lx\n"
 375                           "... RAMDISK_FLAGS     - %08lx\n"
 376                           "... ORIG_ROOT_DEV     - %08lx\n"
 377                           "... LOADER_TYPE       - %08lx\n"
 378                           "... INITRD_START      - %08lx\n"
 379                           "... INITRD_SIZE       - %08lx\n",
 380                           MOUNT_ROOT_RDONLY, RAMDISK_FLAGS,
 381                           ORIG_ROOT_DEV, LOADER_TYPE,
 382                           INITRD_START, INITRD_SIZE);
 383
 384#ifdef CONFIG_BLK_DEV_RAM
 385        rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
 386        rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
 387        rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
 388#endif
 389
 390        if (!MOUNT_ROOT_RDONLY)
 391                root_mountflags &= ~MS_RDONLY;
 392        init_mm.start_code = (unsigned long) _text;
 393        init_mm.end_code = (unsigned long) _etext;
 394        init_mm.end_data = (unsigned long) _edata;
 395        init_mm.brk = (unsigned long) _end;
 396
 397        code_resource.start = virt_to_phys(_text);
 398        code_resource.end = virt_to_phys(_etext)-1;
 399        data_resource.start = virt_to_phys(_etext);
 400        data_resource.end = virt_to_phys(_edata)-1;
 401        bss_resource.start = virt_to_phys(__bss_start);
 402        bss_resource.end = virt_to_phys(_ebss)-1;
 403
 404        memory_start = (unsigned long)__va(__MEMORY_START);
 405        if (!memory_end)
 406                memory_end = memory_start + __MEMORY_SIZE;
 407
 408#ifdef CONFIG_CMDLINE_OVERWRITE
 409        strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
 410#else
 411        strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
 412#ifdef CONFIG_CMDLINE_EXTEND
 413        strlcat(command_line, " ", sizeof(command_line));
 414        strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
 415#endif
 416#endif
 417
 418        /* Save unparsed command line copy for /proc/cmdline */
 419        memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 420        *cmdline_p = command_line;
 421
 422        parse_early_param();
 423
 424        plat_early_device_setup();
 425
 426        sh_mv_setup();
 427
 428        /*
 429         * Find the highest page frame number we have available
 430         */
 431        max_pfn = PFN_DOWN(__pa(memory_end));
 432
 433        /*
 434         * Determine low and high memory ranges:
 435         */
 436        max_low_pfn = max_pfn;
 437        min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
 438
 439        nodes_clear(node_online_map);
 440
 441        /* Setup bootmem with available RAM */
 442        lmb_init();
 443        setup_memory();
 444        sparse_init();
 445
 446#ifdef CONFIG_DUMMY_CONSOLE
 447        conswitchp = &dummy_con;
 448#endif
 449
 450        /* Perform the machine specific initialisation */
 451        if (likely(sh_mv.mv_setup))
 452                sh_mv.mv_setup(cmdline_p);
 453
 454        paging_init();
 455
 456#ifdef CONFIG_SMP
 457        plat_smp_setup();
 458#endif
 459}
 460
 461/* processor boot mode configuration */
 462int generic_mode_pins(void)
 463{
 464        pr_warning("generic_mode_pins(): missing mode pin configuration\n");
 465        return 0;
 466}
 467
 468int test_mode_pin(int pin)
 469{
 470        return sh_mv.mv_mode_pins() & pin;
 471}
 472
 473static const char *cpu_name[] = {
 474        [CPU_SH7201]    = "SH7201",
 475        [CPU_SH7203]    = "SH7203",     [CPU_SH7263]    = "SH7263",
 476        [CPU_SH7206]    = "SH7206",     [CPU_SH7619]    = "SH7619",
 477        [CPU_SH7705]    = "SH7705",     [CPU_SH7706]    = "SH7706",
 478        [CPU_SH7707]    = "SH7707",     [CPU_SH7708]    = "SH7708",
 479        [CPU_SH7709]    = "SH7709",     [CPU_SH7710]    = "SH7710",
 480        [CPU_SH7712]    = "SH7712",     [CPU_SH7720]    = "SH7720",
 481        [CPU_SH7721]    = "SH7721",     [CPU_SH7729]    = "SH7729",
 482        [CPU_SH7750]    = "SH7750",     [CPU_SH7750S]   = "SH7750S",
 483        [CPU_SH7750R]   = "SH7750R",    [CPU_SH7751]    = "SH7751",
 484        [CPU_SH7751R]   = "SH7751R",    [CPU_SH7760]    = "SH7760",
 485        [CPU_SH4_202]   = "SH4-202",    [CPU_SH4_501]   = "SH4-501",
 486        [CPU_SH7763]    = "SH7763",     [CPU_SH7770]    = "SH7770",
 487        [CPU_SH7780]    = "SH7780",     [CPU_SH7781]    = "SH7781",
 488        [CPU_SH7343]    = "SH7343",     [CPU_SH7785]    = "SH7785",
 489        [CPU_SH7786]    = "SH7786",     [CPU_SH7757]    = "SH7757",
 490        [CPU_SH7722]    = "SH7722",     [CPU_SHX3]      = "SH-X3",
 491        [CPU_SH5_101]   = "SH5-101",    [CPU_SH5_103]   = "SH5-103",
 492        [CPU_MXG]       = "MX-G",       [CPU_SH7723]    = "SH7723",
 493        [CPU_SH7366]    = "SH7366",     [CPU_SH7724]    = "SH7724",
 494        [CPU_SH_NONE]   = "Unknown"
 495};
 496
 497const char *get_cpu_subtype(struct sh_cpuinfo *c)
 498{
 499        return cpu_name[c->type];
 500}
 501EXPORT_SYMBOL(get_cpu_subtype);
 502
 503#ifdef CONFIG_PROC_FS
 504/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
 505static const char *cpu_flags[] = {
 506        "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
 507        "ptea", "llsc", "l2", "op32", "pteaex", NULL
 508};
 509
 510static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
 511{
 512        unsigned long i;
 513
 514        seq_printf(m, "cpu flags\t:");
 515
 516        if (!c->flags) {
 517                seq_printf(m, " %s\n", cpu_flags[0]);
 518                return;
 519        }
 520
 521        for (i = 0; cpu_flags[i]; i++)
 522                if ((c->flags & (1 << i)))
 523                        seq_printf(m, " %s", cpu_flags[i+1]);
 524
 525        seq_printf(m, "\n");
 526}
 527
 528static void show_cacheinfo(struct seq_file *m, const char *type,
 529                           struct cache_info info)
 530{
 531        unsigned int cache_size;
 532
 533        cache_size = info.ways * info.sets * info.linesz;
 534
 535        seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
 536                   type, cache_size >> 10, info.ways);
 537}
 538
 539/*
 540 *      Get CPU information for use by the procfs.
 541 */
 542static int show_cpuinfo(struct seq_file *m, void *v)
 543{
 544        struct sh_cpuinfo *c = v;
 545        unsigned int cpu = c - cpu_data;
 546
 547        if (!cpu_online(cpu))
 548                return 0;
 549
 550        if (cpu == 0)
 551                seq_printf(m, "machine\t\t: %s\n", get_system_type());
 552        else
 553                seq_printf(m, "\n");
 554
 555        seq_printf(m, "processor\t: %d\n", cpu);
 556        seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
 557        seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
 558        if (c->cut_major == -1)
 559                seq_printf(m, "cut\t\t: unknown\n");
 560        else if (c->cut_minor == -1)
 561                seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
 562        else
 563                seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
 564
 565        show_cpuflags(m, c);
 566
 567        seq_printf(m, "cache type\t: ");
 568
 569        /*
 570         * Check for what type of cache we have, we support both the
 571         * unified cache on the SH-2 and SH-3, as well as the harvard
 572         * style cache on the SH-4.
 573         */
 574        if (c->icache.flags & SH_CACHE_COMBINED) {
 575                seq_printf(m, "unified\n");
 576                show_cacheinfo(m, "cache", c->icache);
 577        } else {
 578                seq_printf(m, "split (harvard)\n");
 579                show_cacheinfo(m, "icache", c->icache);
 580                show_cacheinfo(m, "dcache", c->dcache);
 581        }
 582
 583        /* Optional secondary cache */
 584        if (c->flags & CPU_HAS_L2_CACHE)
 585                show_cacheinfo(m, "scache", c->scache);
 586
 587        seq_printf(m, "bogomips\t: %lu.%02lu\n",
 588                     c->loops_per_jiffy/(500000/HZ),
 589                     (c->loops_per_jiffy/(5000/HZ)) % 100);
 590
 591        return 0;
 592}
 593
 594static void *c_start(struct seq_file *m, loff_t *pos)
 595{
 596        return *pos < NR_CPUS ? cpu_data + *pos : NULL;
 597}
 598static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 599{
 600        ++*pos;
 601        return c_start(m, pos);
 602}
 603static void c_stop(struct seq_file *m, void *v)
 604{
 605}
 606const struct seq_operations cpuinfo_op = {
 607        .start  = c_start,
 608        .next   = c_next,
 609        .stop   = c_stop,
 610        .show   = show_cpuinfo,
 611};
 612#endif /* CONFIG_PROC_FS */
 613
 614struct dentry *sh_debugfs_root;
 615
 616static int __init sh_debugfs_init(void)
 617{
 618        sh_debugfs_root = debugfs_create_dir("sh", NULL);
 619        if (!sh_debugfs_root)
 620                return -ENOMEM;
 621        if (IS_ERR(sh_debugfs_root))
 622                return PTR_ERR(sh_debugfs_root);
 623
 624        return 0;
 625}
 626arch_initcall(sh_debugfs_init);
 627