linux/arch/arm/mm/init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/arm/mm/init.c
   4 *
   5 *  Copyright (C) 1995-2005 Russell King
   6 */
   7#include <linux/kernel.h>
   8#include <linux/errno.h>
   9#include <linux/swap.h>
  10#include <linux/init.h>
  11#include <linux/mman.h>
  12#include <linux/sched/signal.h>
  13#include <linux/sched/task.h>
  14#include <linux/export.h>
  15#include <linux/nodemask.h>
  16#include <linux/initrd.h>
  17#include <linux/of_fdt.h>
  18#include <linux/highmem.h>
  19#include <linux/gfp.h>
  20#include <linux/memblock.h>
  21#include <linux/dma-contiguous.h>
  22#include <linux/sizes.h>
  23#include <linux/stop_machine.h>
  24#include <linux/swiotlb.h>
  25
  26#include <asm/cp15.h>
  27#include <asm/mach-types.h>
  28#include <asm/memblock.h>
  29#include <asm/memory.h>
  30#include <asm/prom.h>
  31#include <asm/sections.h>
  32#include <asm/setup.h>
  33#include <asm/system_info.h>
  34#include <asm/tlb.h>
  35#include <asm/fixmap.h>
  36#include <asm/ptdump.h>
  37
  38#include <asm/mach/arch.h>
  39#include <asm/mach/map.h>
  40
  41#include "mm.h"
  42
  43#ifdef CONFIG_CPU_CP15_MMU
  44unsigned long __init __clear_cr(unsigned long mask)
  45{
  46        cr_alignment = cr_alignment & ~mask;
  47        return cr_alignment;
  48}
  49#endif
  50
  51#ifdef CONFIG_BLK_DEV_INITRD
  52static int __init parse_tag_initrd(const struct tag *tag)
  53{
  54        pr_warn("ATAG_INITRD is deprecated; "
  55                "please update your bootloader.\n");
  56        phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
  57        phys_initrd_size = tag->u.initrd.size;
  58        return 0;
  59}
  60
  61__tagtable(ATAG_INITRD, parse_tag_initrd);
  62
  63static int __init parse_tag_initrd2(const struct tag *tag)
  64{
  65        phys_initrd_start = tag->u.initrd.start;
  66        phys_initrd_size = tag->u.initrd.size;
  67        return 0;
  68}
  69
  70__tagtable(ATAG_INITRD2, parse_tag_initrd2);
  71#endif
  72
  73static void __init find_limits(unsigned long *min, unsigned long *max_low,
  74                               unsigned long *max_high)
  75{
  76        *max_low = PFN_DOWN(memblock_get_current_limit());
  77        *min = PFN_UP(memblock_start_of_DRAM());
  78        *max_high = PFN_DOWN(memblock_end_of_DRAM());
  79}
  80
  81#ifdef CONFIG_ZONE_DMA
  82
  83phys_addr_t arm_dma_zone_size __read_mostly;
  84EXPORT_SYMBOL(arm_dma_zone_size);
  85
  86/*
  87 * The DMA mask corresponding to the maximum bus address allocatable
  88 * using GFP_DMA.  The default here places no restriction on DMA
  89 * allocations.  This must be the smallest DMA mask in the system,
  90 * so a successful GFP_DMA allocation will always satisfy this.
  91 */
  92phys_addr_t arm_dma_limit;
  93unsigned long arm_dma_pfn_limit;
  94
  95static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
  96        unsigned long dma_size)
  97{
  98        if (size[0] <= dma_size)
  99                return;
 100
 101        size[ZONE_NORMAL] = size[0] - dma_size;
 102        size[ZONE_DMA] = dma_size;
 103        hole[ZONE_NORMAL] = hole[0];
 104        hole[ZONE_DMA] = 0;
 105}
 106#endif
 107
 108void __init setup_dma_zone(const struct machine_desc *mdesc)
 109{
 110#ifdef CONFIG_ZONE_DMA
 111        if (mdesc->dma_zone_size) {
 112                arm_dma_zone_size = mdesc->dma_zone_size;
 113                arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
 114        } else
 115                arm_dma_limit = 0xffffffff;
 116        arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
 117#endif
 118}
 119
 120static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
 121        unsigned long max_high)
 122{
 123        unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
 124        struct memblock_region *reg;
 125
 126        /*
 127         * initialise the zones.
 128         */
 129        memset(zone_size, 0, sizeof(zone_size));
 130
 131        /*
 132         * The memory size has already been determined.  If we need
 133         * to do anything fancy with the allocation of this memory
 134         * to the zones, now is the time to do it.
 135         */
 136        zone_size[0] = max_low - min;
 137#ifdef CONFIG_HIGHMEM
 138        zone_size[ZONE_HIGHMEM] = max_high - max_low;
 139#endif
 140
 141        /*
 142         * Calculate the size of the holes.
 143         *  holes = node_size - sum(bank_sizes)
 144         */
 145        memcpy(zhole_size, zone_size, sizeof(zhole_size));
 146        for_each_memblock(memory, reg) {
 147                unsigned long start = memblock_region_memory_base_pfn(reg);
 148                unsigned long end = memblock_region_memory_end_pfn(reg);
 149
 150                if (start < max_low) {
 151                        unsigned long low_end = min(end, max_low);
 152                        zhole_size[0] -= low_end - start;
 153                }
 154#ifdef CONFIG_HIGHMEM
 155                if (end > max_low) {
 156                        unsigned long high_start = max(start, max_low);
 157                        zhole_size[ZONE_HIGHMEM] -= end - high_start;
 158                }
 159#endif
 160        }
 161
 162#ifdef CONFIG_ZONE_DMA
 163        /*
 164         * Adjust the sizes according to any special requirements for
 165         * this machine type.
 166         */
 167        if (arm_dma_zone_size)
 168                arm_adjust_dma_zone(zone_size, zhole_size,
 169                        arm_dma_zone_size >> PAGE_SHIFT);
 170#endif
 171
 172        free_area_init_node(0, zone_size, min, zhole_size);
 173}
 174
 175#ifdef CONFIG_HAVE_ARCH_PFN_VALID
 176int pfn_valid(unsigned long pfn)
 177{
 178        phys_addr_t addr = __pfn_to_phys(pfn);
 179
 180        if (__phys_to_pfn(addr) != pfn)
 181                return 0;
 182
 183        return memblock_is_map_memory(__pfn_to_phys(pfn));
 184}
 185EXPORT_SYMBOL(pfn_valid);
 186#endif
 187
 188static bool arm_memblock_steal_permitted = true;
 189
 190phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
 191{
 192        phys_addr_t phys;
 193
 194        BUG_ON(!arm_memblock_steal_permitted);
 195
 196        phys = memblock_phys_alloc(size, align);
 197        if (!phys)
 198                panic("Failed to steal %pa bytes at %pS\n",
 199                      &size, (void *)_RET_IP_);
 200
 201        memblock_free(phys, size);
 202        memblock_remove(phys, size);
 203
 204        return phys;
 205}
 206
 207static void __init arm_initrd_init(void)
 208{
 209#ifdef CONFIG_BLK_DEV_INITRD
 210        phys_addr_t start;
 211        unsigned long size;
 212
 213        initrd_start = initrd_end = 0;
 214
 215        if (!phys_initrd_size)
 216                return;
 217
 218        /*
 219         * Round the memory region to page boundaries as per free_initrd_mem()
 220         * This allows us to detect whether the pages overlapping the initrd
 221         * are in use, but more importantly, reserves the entire set of pages
 222         * as we don't want these pages allocated for other purposes.
 223         */
 224        start = round_down(phys_initrd_start, PAGE_SIZE);
 225        size = phys_initrd_size + (phys_initrd_start - start);
 226        size = round_up(size, PAGE_SIZE);
 227
 228        if (!memblock_is_region_memory(start, size)) {
 229                pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
 230                       (u64)start, size);
 231                return;
 232        }
 233
 234        if (memblock_is_region_reserved(start, size)) {
 235                pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
 236                       (u64)start, size);
 237                return;
 238        }
 239
 240        memblock_reserve(start, size);
 241
 242        /* Now convert initrd to virtual addresses */
 243        initrd_start = __phys_to_virt(phys_initrd_start);
 244        initrd_end = initrd_start + phys_initrd_size;
 245#endif
 246}
 247
 248#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
 249void check_cpu_icache_size(int cpuid)
 250{
 251        u32 size, ctr;
 252
 253        asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
 254
 255        size = 1 << ((ctr & 0xf) + 2);
 256        if (cpuid != 0 && icache_size != size)
 257                pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
 258                        cpuid);
 259        if (icache_size > size)
 260                icache_size = size;
 261}
 262#endif
 263
 264void __init arm_memblock_init(const struct machine_desc *mdesc)
 265{
 266        /* Register the kernel text, kernel data and initrd with memblock. */
 267        memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
 268
 269        arm_initrd_init();
 270
 271        arm_mm_memblock_reserve();
 272
 273        /* reserve any platform specific memblock areas */
 274        if (mdesc->reserve)
 275                mdesc->reserve();
 276
 277        early_init_fdt_reserve_self();
 278        early_init_fdt_scan_reserved_mem();
 279
 280        /* reserve memory for DMA contiguous allocations */
 281        dma_contiguous_reserve(arm_dma_limit);
 282
 283        arm_memblock_steal_permitted = false;
 284        memblock_dump_all();
 285}
 286
 287void __init bootmem_init(void)
 288{
 289        memblock_allow_resize();
 290
 291        find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
 292
 293        early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
 294                      (phys_addr_t)max_low_pfn << PAGE_SHIFT);
 295
 296        /*
 297         * Sparsemem tries to allocate bootmem in memory_present(),
 298         * so must be done after the fixed reservations
 299         */
 300        memblocks_present();
 301
 302        /*
 303         * sparse_init() needs the bootmem allocator up and running.
 304         */
 305        sparse_init();
 306
 307        /*
 308         * Now free the memory - free_area_init_node needs
 309         * the sparse mem_map arrays initialized by sparse_init()
 310         * for memmap_init_zone(), otherwise all PFNs are invalid.
 311         */
 312        zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
 313}
 314
 315/*
 316 * Poison init memory with an undefined instruction (ARM) or a branch to an
 317 * undefined instruction (Thumb).
 318 */
 319static inline void poison_init_mem(void *s, size_t count)
 320{
 321        u32 *p = (u32 *)s;
 322        for (; count != 0; count -= 4)
 323                *p++ = 0xe7fddef0;
 324}
 325
 326static inline void
 327free_memmap(unsigned long start_pfn, unsigned long end_pfn)
 328{
 329        struct page *start_pg, *end_pg;
 330        phys_addr_t pg, pgend;
 331
 332        /*
 333         * Convert start_pfn/end_pfn to a struct page pointer.
 334         */
 335        start_pg = pfn_to_page(start_pfn - 1) + 1;
 336        end_pg = pfn_to_page(end_pfn - 1) + 1;
 337
 338        /*
 339         * Convert to physical addresses, and
 340         * round start upwards and end downwards.
 341         */
 342        pg = PAGE_ALIGN(__pa(start_pg));
 343        pgend = __pa(end_pg) & PAGE_MASK;
 344
 345        /*
 346         * If there are free pages between these,
 347         * free the section of the memmap array.
 348         */
 349        if (pg < pgend)
 350                memblock_free_early(pg, pgend - pg);
 351}
 352
 353/*
 354 * The mem_map array can get very big.  Free the unused area of the memory map.
 355 */
 356static void __init free_unused_memmap(void)
 357{
 358        unsigned long start, prev_end = 0;
 359        struct memblock_region *reg;
 360
 361        /*
 362         * This relies on each bank being in address order.
 363         * The banks are sorted previously in bootmem_init().
 364         */
 365        for_each_memblock(memory, reg) {
 366                start = memblock_region_memory_base_pfn(reg);
 367
 368#ifdef CONFIG_SPARSEMEM
 369                /*
 370                 * Take care not to free memmap entries that don't exist
 371                 * due to SPARSEMEM sections which aren't present.
 372                 */
 373                start = min(start,
 374                                 ALIGN(prev_end, PAGES_PER_SECTION));
 375#else
 376                /*
 377                 * Align down here since the VM subsystem insists that the
 378                 * memmap entries are valid from the bank start aligned to
 379                 * MAX_ORDER_NR_PAGES.
 380                 */
 381                start = round_down(start, MAX_ORDER_NR_PAGES);
 382#endif
 383                /*
 384                 * If we had a previous bank, and there is a space
 385                 * between the current bank and the previous, free it.
 386                 */
 387                if (prev_end && prev_end < start)
 388                        free_memmap(prev_end, start);
 389
 390                /*
 391                 * Align up here since the VM subsystem insists that the
 392                 * memmap entries are valid from the bank end aligned to
 393                 * MAX_ORDER_NR_PAGES.
 394                 */
 395                prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
 396                                 MAX_ORDER_NR_PAGES);
 397        }
 398
 399#ifdef CONFIG_SPARSEMEM
 400        if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
 401                free_memmap(prev_end,
 402                            ALIGN(prev_end, PAGES_PER_SECTION));
 403#endif
 404}
 405
 406#ifdef CONFIG_HIGHMEM
 407static inline void free_area_high(unsigned long pfn, unsigned long end)
 408{
 409        for (; pfn < end; pfn++)
 410                free_highmem_page(pfn_to_page(pfn));
 411}
 412#endif
 413
 414static void __init free_highpages(void)
 415{
 416#ifdef CONFIG_HIGHMEM
 417        unsigned long max_low = max_low_pfn;
 418        struct memblock_region *mem, *res;
 419
 420        /* set highmem page free */
 421        for_each_memblock(memory, mem) {
 422                unsigned long start = memblock_region_memory_base_pfn(mem);
 423                unsigned long end = memblock_region_memory_end_pfn(mem);
 424
 425                /* Ignore complete lowmem entries */
 426                if (end <= max_low)
 427                        continue;
 428
 429                if (memblock_is_nomap(mem))
 430                        continue;
 431
 432                /* Truncate partial highmem entries */
 433                if (start < max_low)
 434                        start = max_low;
 435
 436                /* Find and exclude any reserved regions */
 437                for_each_memblock(reserved, res) {
 438                        unsigned long res_start, res_end;
 439
 440                        res_start = memblock_region_reserved_base_pfn(res);
 441                        res_end = memblock_region_reserved_end_pfn(res);
 442
 443                        if (res_end < start)
 444                                continue;
 445                        if (res_start < start)
 446                                res_start = start;
 447                        if (res_start > end)
 448                                res_start = end;
 449                        if (res_end > end)
 450                                res_end = end;
 451                        if (res_start != start)
 452                                free_area_high(start, res_start);
 453                        start = res_end;
 454                        if (start == end)
 455                                break;
 456                }
 457
 458                /* And now free anything which remains */
 459                if (start < end)
 460                        free_area_high(start, end);
 461        }
 462#endif
 463}
 464
 465/*
 466 * mem_init() marks the free areas in the mem_map and tells us how much
 467 * memory is free.  This is done after various parts of the system have
 468 * claimed their memory after the kernel image.
 469 */
 470void __init mem_init(void)
 471{
 472#ifdef CONFIG_ARM_LPAE
 473        swiotlb_init(1);
 474#endif
 475
 476        set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 477
 478        /* this will put all unused low memory onto the freelists */
 479        free_unused_memmap();
 480        memblock_free_all();
 481
 482#ifdef CONFIG_SA1111
 483        /* now that our DMA memory is actually so designated, we can free it */
 484        free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
 485#endif
 486
 487        free_highpages();
 488
 489        mem_init_print_info(NULL);
 490
 491        /*
 492         * Check boundaries twice: Some fundamental inconsistencies can
 493         * be detected at build time already.
 494         */
 495#ifdef CONFIG_MMU
 496        BUILD_BUG_ON(TASK_SIZE                          > MODULES_VADDR);
 497        BUG_ON(TASK_SIZE                                > MODULES_VADDR);
 498#endif
 499
 500#ifdef CONFIG_HIGHMEM
 501        BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
 502        BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE      > PAGE_OFFSET);
 503#endif
 504}
 505
 506#ifdef CONFIG_STRICT_KERNEL_RWX
 507struct section_perm {
 508        const char *name;
 509        unsigned long start;
 510        unsigned long end;
 511        pmdval_t mask;
 512        pmdval_t prot;
 513        pmdval_t clear;
 514};
 515
 516/* First section-aligned location at or after __start_rodata. */
 517extern char __start_rodata_section_aligned[];
 518
 519static struct section_perm nx_perms[] = {
 520        /* Make pages tables, etc before _stext RW (set NX). */
 521        {
 522                .name   = "pre-text NX",
 523                .start  = PAGE_OFFSET,
 524                .end    = (unsigned long)_stext,
 525                .mask   = ~PMD_SECT_XN,
 526                .prot   = PMD_SECT_XN,
 527        },
 528        /* Make init RW (set NX). */
 529        {
 530                .name   = "init NX",
 531                .start  = (unsigned long)__init_begin,
 532                .end    = (unsigned long)_sdata,
 533                .mask   = ~PMD_SECT_XN,
 534                .prot   = PMD_SECT_XN,
 535        },
 536        /* Make rodata NX (set RO in ro_perms below). */
 537        {
 538                .name   = "rodata NX",
 539                .start  = (unsigned long)__start_rodata_section_aligned,
 540                .end    = (unsigned long)__init_begin,
 541                .mask   = ~PMD_SECT_XN,
 542                .prot   = PMD_SECT_XN,
 543        },
 544};
 545
 546static struct section_perm ro_perms[] = {
 547        /* Make kernel code and rodata RX (set RO). */
 548        {
 549                .name   = "text/rodata RO",
 550                .start  = (unsigned long)_stext,
 551                .end    = (unsigned long)__init_begin,
 552#ifdef CONFIG_ARM_LPAE
 553                .mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
 554                .prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
 555#else
 556                .mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
 557                .prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
 558                .clear  = PMD_SECT_AP_WRITE,
 559#endif
 560        },
 561};
 562
 563/*
 564 * Updates section permissions only for the current mm (sections are
 565 * copied into each mm). During startup, this is the init_mm. Is only
 566 * safe to be called with preemption disabled, as under stop_machine().
 567 */
 568static inline void section_update(unsigned long addr, pmdval_t mask,
 569                                  pmdval_t prot, struct mm_struct *mm)
 570{
 571        pmd_t *pmd;
 572
 573        pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
 574
 575#ifdef CONFIG_ARM_LPAE
 576        pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
 577#else
 578        if (addr & SECTION_SIZE)
 579                pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
 580        else
 581                pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
 582#endif
 583        flush_pmd_entry(pmd);
 584        local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
 585}
 586
 587/* Make sure extended page tables are in use. */
 588static inline bool arch_has_strict_perms(void)
 589{
 590        if (cpu_architecture() < CPU_ARCH_ARMv6)
 591                return false;
 592
 593        return !!(get_cr() & CR_XP);
 594}
 595
 596void set_section_perms(struct section_perm *perms, int n, bool set,
 597                        struct mm_struct *mm)
 598{
 599        size_t i;
 600        unsigned long addr;
 601
 602        if (!arch_has_strict_perms())
 603                return;
 604
 605        for (i = 0; i < n; i++) {
 606                if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
 607                    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
 608                        pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
 609                                perms[i].name, perms[i].start, perms[i].end,
 610                                SECTION_SIZE);
 611                        continue;
 612                }
 613
 614                for (addr = perms[i].start;
 615                     addr < perms[i].end;
 616                     addr += SECTION_SIZE)
 617                        section_update(addr, perms[i].mask,
 618                                set ? perms[i].prot : perms[i].clear, mm);
 619        }
 620
 621}
 622
 623/**
 624 * update_sections_early intended to be called only through stop_machine
 625 * framework and executed by only one CPU while all other CPUs will spin and
 626 * wait, so no locking is required in this function.
 627 */
 628static void update_sections_early(struct section_perm perms[], int n)
 629{
 630        struct task_struct *t, *s;
 631
 632        for_each_process(t) {
 633                if (t->flags & PF_KTHREAD)
 634                        continue;
 635                for_each_thread(t, s)
 636                        if (s->mm)
 637                                set_section_perms(perms, n, true, s->mm);
 638        }
 639        set_section_perms(perms, n, true, current->active_mm);
 640        set_section_perms(perms, n, true, &init_mm);
 641}
 642
 643static int __fix_kernmem_perms(void *unused)
 644{
 645        update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
 646        return 0;
 647}
 648
 649static void fix_kernmem_perms(void)
 650{
 651        stop_machine(__fix_kernmem_perms, NULL, NULL);
 652}
 653
 654static int __mark_rodata_ro(void *unused)
 655{
 656        update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
 657        return 0;
 658}
 659
 660static int kernel_set_to_readonly __read_mostly;
 661
 662void mark_rodata_ro(void)
 663{
 664        kernel_set_to_readonly = 1;
 665        stop_machine(__mark_rodata_ro, NULL, NULL);
 666        debug_checkwx();
 667}
 668
 669void set_kernel_text_rw(void)
 670{
 671        if (!kernel_set_to_readonly)
 672                return;
 673
 674        set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
 675                                current->active_mm);
 676}
 677
 678void set_kernel_text_ro(void)
 679{
 680        if (!kernel_set_to_readonly)
 681                return;
 682
 683        set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
 684                                current->active_mm);
 685}
 686
 687#else
 688static inline void fix_kernmem_perms(void) { }
 689#endif /* CONFIG_STRICT_KERNEL_RWX */
 690
 691void free_initmem(void)
 692{
 693        fix_kernmem_perms();
 694
 695        poison_init_mem(__init_begin, __init_end - __init_begin);
 696        if (!machine_is_integrator() && !machine_is_cintegrator())
 697                free_initmem_default(-1);
 698}
 699
 700#ifdef CONFIG_BLK_DEV_INITRD
 701void free_initrd_mem(unsigned long start, unsigned long end)
 702{
 703        if (start == initrd_start)
 704                start = round_down(start, PAGE_SIZE);
 705        if (end == initrd_end)
 706                end = round_up(end, PAGE_SIZE);
 707
 708        poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
 709        free_reserved_area((void *)start, (void *)end, -1, "initrd");
 710}
 711#endif
 712