linux/fs/proc/vmcore.c
<<
>>
Prefs
   1/*
   2 *      fs/proc/vmcore.c Interface for accessing the crash
   3 *                               dump from the system's previous life.
   4 *      Heavily borrowed from fs/proc/kcore.c
   5 *      Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
   6 *      Copyright (C) IBM Corporation, 2004. All rights reserved
   7 *
   8 */
   9
  10#include <linux/mm.h>
  11#include <linux/kcore.h>
  12#include <linux/user.h>
  13#include <linux/elf.h>
  14#include <linux/elfcore.h>
  15#include <linux/export.h>
  16#include <linux/slab.h>
  17#include <linux/highmem.h>
  18#include <linux/printk.h>
  19#include <linux/bootmem.h>
  20#include <linux/init.h>
  21#include <linux/crash_dump.h>
  22#include <linux/list.h>
  23#include <linux/vmalloc.h>
  24#include <linux/pagemap.h>
  25#include <asm/uaccess.h>
  26#include <asm/io.h>
  27#include "internal.h"
  28
  29/* List representing chunks of contiguous memory areas and their offsets in
  30 * vmcore file.
  31 */
  32static LIST_HEAD(vmcore_list);
  33
  34/* Stores the pointer to the buffer containing kernel elf core headers. */
  35static char *elfcorebuf;
  36static size_t elfcorebuf_sz;
  37static size_t elfcorebuf_sz_orig;
  38
  39static char *elfnotes_buf;
  40static size_t elfnotes_sz;
  41
  42/* Total size of vmcore file. */
  43static u64 vmcore_size;
  44
  45static struct proc_dir_entry *proc_vmcore;
  46
  47/*
  48 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
  49 * The called function has to take care of module refcounting.
  50 */
  51static int (*oldmem_pfn_is_ram)(unsigned long pfn);
  52
  53int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
  54{
  55        if (oldmem_pfn_is_ram)
  56                return -EBUSY;
  57        oldmem_pfn_is_ram = fn;
  58        return 0;
  59}
  60EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
  61
  62void unregister_oldmem_pfn_is_ram(void)
  63{
  64        oldmem_pfn_is_ram = NULL;
  65        wmb();
  66}
  67EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
  68
  69static int pfn_is_ram(unsigned long pfn)
  70{
  71        int (*fn)(unsigned long pfn);
  72        /* pfn is ram unless fn() checks pagetype */
  73        int ret = 1;
  74
  75        /*
  76         * Ask hypervisor if the pfn is really ram.
  77         * A ballooned page contains no data and reading from such a page
  78         * will cause high load in the hypervisor.
  79         */
  80        fn = oldmem_pfn_is_ram;
  81        if (fn)
  82                ret = fn(pfn);
  83
  84        return ret;
  85}
  86
  87/* Reads a page from the oldmem device from given offset. */
  88static ssize_t read_from_oldmem(char *buf, size_t count,
  89                                u64 *ppos, int userbuf)
  90{
  91        unsigned long pfn, offset;
  92        size_t nr_bytes;
  93        ssize_t read = 0, tmp;
  94
  95        if (!count)
  96                return 0;
  97
  98        offset = (unsigned long)(*ppos % PAGE_SIZE);
  99        pfn = (unsigned long)(*ppos / PAGE_SIZE);
 100
 101        do {
 102                if (count > (PAGE_SIZE - offset))
 103                        nr_bytes = PAGE_SIZE - offset;
 104                else
 105                        nr_bytes = count;
 106
 107                /* If pfn is not ram, return zeros for sparse dump files */
 108                if (pfn_is_ram(pfn) == 0)
 109                        memset(buf, 0, nr_bytes);
 110                else {
 111                        tmp = copy_oldmem_page(pfn, buf, nr_bytes,
 112                                                offset, userbuf);
 113                        if (tmp < 0)
 114                                return tmp;
 115                }
 116                *ppos += nr_bytes;
 117                count -= nr_bytes;
 118                buf += nr_bytes;
 119                read += nr_bytes;
 120                ++pfn;
 121                offset = 0;
 122        } while (count);
 123
 124        return read;
 125}
 126
 127/*
 128 * Architectures may override this function to allocate ELF header in 2nd kernel
 129 */
 130int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 131{
 132        return 0;
 133}
 134
 135/*
 136 * Architectures may override this function to free header
 137 */
 138void __weak elfcorehdr_free(unsigned long long addr)
 139{}
 140
 141/*
 142 * Architectures may override this function to read from ELF header
 143 */
 144ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 145{
 146        return read_from_oldmem(buf, count, ppos, 0);
 147}
 148
 149/*
 150 * Architectures may override this function to read from notes sections
 151 */
 152ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
 153{
 154        return read_from_oldmem(buf, count, ppos, 0);
 155}
 156
 157/*
 158 * Architectures may override this function to map oldmem
 159 */
 160int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
 161                                  unsigned long from, unsigned long pfn,
 162                                  unsigned long size, pgprot_t prot)
 163{
 164        return remap_pfn_range(vma, from, pfn, size, prot);
 165}
 166
 167/*
 168 * Copy to either kernel or user space
 169 */
 170static int copy_to(void *target, void *src, size_t size, int userbuf)
 171{
 172        if (userbuf) {
 173                if (copy_to_user((char __user *) target, src, size))
 174                        return -EFAULT;
 175        } else {
 176                memcpy(target, src, size);
 177        }
 178        return 0;
 179}
 180
 181/* Read from the ELF header and then the crash dump. On error, negative value is
 182 * returned otherwise number of bytes read are returned.
 183 */
 184static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
 185                             int userbuf)
 186{
 187        ssize_t acc = 0, tmp;
 188        size_t tsz;
 189        u64 start;
 190        struct vmcore *m = NULL;
 191
 192        if (buflen == 0 || *fpos >= vmcore_size)
 193                return 0;
 194
 195        /* trim buflen to not go beyond EOF */
 196        if (buflen > vmcore_size - *fpos)
 197                buflen = vmcore_size - *fpos;
 198
 199        /* Read ELF core header */
 200        if (*fpos < elfcorebuf_sz) {
 201                tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
 202                if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
 203                        return -EFAULT;
 204                buflen -= tsz;
 205                *fpos += tsz;
 206                buffer += tsz;
 207                acc += tsz;
 208
 209                /* leave now if filled buffer already */
 210                if (buflen == 0)
 211                        return acc;
 212        }
 213
 214        /* Read Elf note segment */
 215        if (*fpos < elfcorebuf_sz + elfnotes_sz) {
 216                void *kaddr;
 217
 218                tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
 219                kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
 220                if (copy_to(buffer, kaddr, tsz, userbuf))
 221                        return -EFAULT;
 222                buflen -= tsz;
 223                *fpos += tsz;
 224                buffer += tsz;
 225                acc += tsz;
 226
 227                /* leave now if filled buffer already */
 228                if (buflen == 0)
 229                        return acc;
 230        }
 231
 232        list_for_each_entry(m, &vmcore_list, list) {
 233                if (*fpos < m->offset + m->size) {
 234                        tsz = (size_t)min_t(unsigned long long,
 235                                            m->offset + m->size - *fpos,
 236                                            buflen);
 237                        start = m->paddr + *fpos - m->offset;
 238                        tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
 239                        if (tmp < 0)
 240                                return tmp;
 241                        buflen -= tsz;
 242                        *fpos += tsz;
 243                        buffer += tsz;
 244                        acc += tsz;
 245
 246                        /* leave now if filled buffer already */
 247                        if (buflen == 0)
 248                                return acc;
 249                }
 250        }
 251
 252        return acc;
 253}
 254
 255static ssize_t read_vmcore(struct file *file, char __user *buffer,
 256                           size_t buflen, loff_t *fpos)
 257{
 258        return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
 259}
 260
 261/*
 262 * The vmcore fault handler uses the page cache and fills data using the
 263 * standard __vmcore_read() function.
 264 *
 265 * On s390 the fault handler is used for memory regions that can't be mapped
 266 * directly with remap_pfn_range().
 267 */
 268static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 269{
 270#ifdef CONFIG_S390
 271        struct address_space *mapping = vma->vm_file->f_mapping;
 272        pgoff_t index = vmf->pgoff;
 273        struct page *page;
 274        loff_t offset;
 275        char *buf;
 276        int rc;
 277
 278        page = find_or_create_page(mapping, index, GFP_KERNEL);
 279        if (!page)
 280                return VM_FAULT_OOM;
 281        if (!PageUptodate(page)) {
 282                offset = (loff_t) index << PAGE_SHIFT;
 283                buf = __va((page_to_pfn(page) << PAGE_SHIFT));
 284                rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
 285                if (rc < 0) {
 286                        unlock_page(page);
 287                        put_page(page);
 288                        return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
 289                }
 290                SetPageUptodate(page);
 291        }
 292        unlock_page(page);
 293        vmf->page = page;
 294        return 0;
 295#else
 296        return VM_FAULT_SIGBUS;
 297#endif
 298}
 299
 300static const struct vm_operations_struct vmcore_mmap_ops = {
 301        .fault = mmap_vmcore_fault,
 302};
 303
 304/**
 305 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
 306 *                      vmalloc memory
 307 *
 308 * @notes_sz: size of buffer
 309 *
 310 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
 311 * the buffer to user-space by means of remap_vmalloc_range().
 312 *
 313 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
 314 * disabled and there's no need to allow users to mmap the buffer.
 315 */
 316static inline char *alloc_elfnotes_buf(size_t notes_sz)
 317{
 318#ifdef CONFIG_MMU
 319        return vmalloc_user(notes_sz);
 320#else
 321        return vzalloc(notes_sz);
 322#endif
 323}
 324
 325/*
 326 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
 327 * essential for mmap_vmcore() in order to map physically
 328 * non-contiguous objects (ELF header, ELF note segment and memory
 329 * regions in the 1st kernel pointed to by PT_LOAD entries) into
 330 * virtually contiguous user-space in ELF layout.
 331 */
 332#ifdef CONFIG_MMU
 333/*
 334 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
 335 * reported as not being ram with the zero page.
 336 *
 337 * @vma: vm_area_struct describing requested mapping
 338 * @from: start remapping from
 339 * @pfn: page frame number to start remapping to
 340 * @size: remapping size
 341 * @prot: protection bits
 342 *
 343 * Returns zero on success, -EAGAIN on failure.
 344 */
 345static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
 346                                    unsigned long from, unsigned long pfn,
 347                                    unsigned long size, pgprot_t prot)
 348{
 349        unsigned long map_size;
 350        unsigned long pos_start, pos_end, pos;
 351        unsigned long zeropage_pfn = my_zero_pfn(0);
 352        size_t len = 0;
 353
 354        pos_start = pfn;
 355        pos_end = pfn + (size >> PAGE_SHIFT);
 356
 357        for (pos = pos_start; pos < pos_end; ++pos) {
 358                if (!pfn_is_ram(pos)) {
 359                        /*
 360                         * We hit a page which is not ram. Remap the continuous
 361                         * region between pos_start and pos-1 and replace
 362                         * the non-ram page at pos with the zero page.
 363                         */
 364                        if (pos > pos_start) {
 365                                /* Remap continuous region */
 366                                map_size = (pos - pos_start) << PAGE_SHIFT;
 367                                if (remap_oldmem_pfn_range(vma, from + len,
 368                                                           pos_start, map_size,
 369                                                           prot))
 370                                        goto fail;
 371                                len += map_size;
 372                        }
 373                        /* Remap the zero page */
 374                        if (remap_oldmem_pfn_range(vma, from + len,
 375                                                   zeropage_pfn,
 376                                                   PAGE_SIZE, prot))
 377                                goto fail;
 378                        len += PAGE_SIZE;
 379                        pos_start = pos + 1;
 380                }
 381        }
 382        if (pos > pos_start) {
 383                /* Remap the rest */
 384                map_size = (pos - pos_start) << PAGE_SHIFT;
 385                if (remap_oldmem_pfn_range(vma, from + len, pos_start,
 386                                           map_size, prot))
 387                        goto fail;
 388        }
 389        return 0;
 390fail:
 391        do_munmap(vma->vm_mm, from, len);
 392        return -EAGAIN;
 393}
 394
 395static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
 396                            unsigned long from, unsigned long pfn,
 397                            unsigned long size, pgprot_t prot)
 398{
 399        /*
 400         * Check if oldmem_pfn_is_ram was registered to avoid
 401         * looping over all pages without a reason.
 402         */
 403        if (oldmem_pfn_is_ram)
 404                return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
 405        else
 406                return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
 407}
 408
 409static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 410{
 411        size_t size = vma->vm_end - vma->vm_start;
 412        u64 start, end, len, tsz;
 413        struct vmcore *m;
 414
 415        start = (u64)vma->vm_pgoff << PAGE_SHIFT;
 416        end = start + size;
 417
 418        if (size > vmcore_size || end > vmcore_size)
 419                return -EINVAL;
 420
 421        if (vma->vm_flags & (VM_WRITE | VM_EXEC))
 422                return -EPERM;
 423
 424        vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
 425        vma->vm_flags |= VM_MIXEDMAP;
 426        vma->vm_ops = &vmcore_mmap_ops;
 427
 428        len = 0;
 429
 430        if (start < elfcorebuf_sz) {
 431                u64 pfn;
 432
 433                tsz = min(elfcorebuf_sz - (size_t)start, size);
 434                pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
 435                if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
 436                                    vma->vm_page_prot))
 437                        return -EAGAIN;
 438                size -= tsz;
 439                start += tsz;
 440                len += tsz;
 441
 442                if (size == 0)
 443                        return 0;
 444        }
 445
 446        if (start < elfcorebuf_sz + elfnotes_sz) {
 447                void *kaddr;
 448
 449                tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
 450                kaddr = elfnotes_buf + start - elfcorebuf_sz;
 451                if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
 452                                                kaddr, tsz))
 453                        goto fail;
 454                size -= tsz;
 455                start += tsz;
 456                len += tsz;
 457
 458                if (size == 0)
 459                        return 0;
 460        }
 461
 462        list_for_each_entry(m, &vmcore_list, list) {
 463                if (start < m->offset + m->size) {
 464                        u64 paddr = 0;
 465
 466                        tsz = (size_t)min_t(unsigned long long,
 467                                            m->offset + m->size - start, size);
 468                        paddr = m->paddr + start - m->offset;
 469                        if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
 470                                                    paddr >> PAGE_SHIFT, tsz,
 471                                                    vma->vm_page_prot))
 472                                goto fail;
 473                        size -= tsz;
 474                        start += tsz;
 475                        len += tsz;
 476
 477                        if (size == 0)
 478                                return 0;
 479                }
 480        }
 481
 482        return 0;
 483fail:
 484        do_munmap(vma->vm_mm, vma->vm_start, len);
 485        return -EAGAIN;
 486}
 487#else
 488static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 489{
 490        return -ENOSYS;
 491}
 492#endif
 493
 494static const struct file_operations proc_vmcore_operations = {
 495        .read           = read_vmcore,
 496        .llseek         = default_llseek,
 497        .mmap           = mmap_vmcore,
 498};
 499
 500static struct vmcore* __init get_new_element(void)
 501{
 502        return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
 503}
 504
 505static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
 506                                  struct list_head *vc_list)
 507{
 508        u64 size;
 509        struct vmcore *m;
 510
 511        size = elfsz + elfnotesegsz;
 512        list_for_each_entry(m, vc_list, list) {
 513                size += m->size;
 514        }
 515        return size;
 516}
 517
 518/**
 519 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
 520 *
 521 * @ehdr_ptr: ELF header
 522 *
 523 * This function updates p_memsz member of each PT_NOTE entry in the
 524 * program header table pointed to by @ehdr_ptr to real size of ELF
 525 * note segment.
 526 */
 527static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
 528{
 529        int i, rc=0;
 530        Elf64_Phdr *phdr_ptr;
 531        Elf64_Nhdr *nhdr_ptr;
 532
 533        phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 534        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 535                void *notes_section;
 536                u64 offset, max_sz, sz, real_sz = 0;
 537                if (phdr_ptr->p_type != PT_NOTE)
 538                        continue;
 539                max_sz = phdr_ptr->p_memsz;
 540                offset = phdr_ptr->p_offset;
 541                notes_section = kmalloc(max_sz, GFP_KERNEL);
 542                if (!notes_section)
 543                        return -ENOMEM;
 544                rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 545                if (rc < 0) {
 546                        kfree(notes_section);
 547                        return rc;
 548                }
 549                nhdr_ptr = notes_section;
 550                while (nhdr_ptr->n_namesz != 0) {
 551                        sz = sizeof(Elf64_Nhdr) +
 552                                (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 553                                (((u64)nhdr_ptr->n_descsz + 3) & ~3);
 554                        if ((real_sz + sz) > max_sz) {
 555                                pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 556                                        nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 557                                break;
 558                        }
 559                        real_sz += sz;
 560                        nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
 561                }
 562                kfree(notes_section);
 563                phdr_ptr->p_memsz = real_sz;
 564                if (real_sz == 0) {
 565                        pr_warn("Warning: Zero PT_NOTE entries found\n");
 566                }
 567        }
 568
 569        return 0;
 570}
 571
 572/**
 573 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
 574 * headers and sum of real size of their ELF note segment headers and
 575 * data.
 576 *
 577 * @ehdr_ptr: ELF header
 578 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 579 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 580 *
 581 * This function is used to merge multiple PT_NOTE program headers
 582 * into a unique single one. The resulting unique entry will have
 583 * @sz_ptnote in its phdr->p_mem.
 584 *
 585 * It is assumed that program headers with PT_NOTE type pointed to by
 586 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 587 * and each of PT_NOTE program headers has actual ELF note segment
 588 * size in its p_memsz member.
 589 */
 590static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
 591                                                 int *nr_ptnote, u64 *sz_ptnote)
 592{
 593        int i;
 594        Elf64_Phdr *phdr_ptr;
 595
 596        *nr_ptnote = *sz_ptnote = 0;
 597
 598        phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 599        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 600                if (phdr_ptr->p_type != PT_NOTE)
 601                        continue;
 602                *nr_ptnote += 1;
 603                *sz_ptnote += phdr_ptr->p_memsz;
 604        }
 605
 606        return 0;
 607}
 608
 609/**
 610 * copy_notes_elf64 - copy ELF note segments in a given buffer
 611 *
 612 * @ehdr_ptr: ELF header
 613 * @notes_buf: buffer into which ELF note segments are copied
 614 *
 615 * This function is used to copy ELF note segment in the 1st kernel
 616 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 617 * size of the buffer @notes_buf is equal to or larger than sum of the
 618 * real ELF note segment headers and data.
 619 *
 620 * It is assumed that program headers with PT_NOTE type pointed to by
 621 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 622 * and each of PT_NOTE program headers has actual ELF note segment
 623 * size in its p_memsz member.
 624 */
 625static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
 626{
 627        int i, rc=0;
 628        Elf64_Phdr *phdr_ptr;
 629
 630        phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
 631
 632        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 633                u64 offset;
 634                if (phdr_ptr->p_type != PT_NOTE)
 635                        continue;
 636                offset = phdr_ptr->p_offset;
 637                rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 638                                           &offset);
 639                if (rc < 0)
 640                        return rc;
 641                notes_buf += phdr_ptr->p_memsz;
 642        }
 643
 644        return 0;
 645}
 646
 647/* Merges all the PT_NOTE headers into one. */
 648static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
 649                                           char **notes_buf, size_t *notes_sz)
 650{
 651        int i, nr_ptnote=0, rc=0;
 652        char *tmp;
 653        Elf64_Ehdr *ehdr_ptr;
 654        Elf64_Phdr phdr;
 655        u64 phdr_sz = 0, note_off;
 656
 657        ehdr_ptr = (Elf64_Ehdr *)elfptr;
 658
 659        rc = update_note_header_size_elf64(ehdr_ptr);
 660        if (rc < 0)
 661                return rc;
 662
 663        rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
 664        if (rc < 0)
 665                return rc;
 666
 667        *notes_sz = roundup(phdr_sz, PAGE_SIZE);
 668        *notes_buf = alloc_elfnotes_buf(*notes_sz);
 669        if (!*notes_buf)
 670                return -ENOMEM;
 671
 672        rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
 673        if (rc < 0)
 674                return rc;
 675
 676        /* Prepare merged PT_NOTE program header. */
 677        phdr.p_type    = PT_NOTE;
 678        phdr.p_flags   = 0;
 679        note_off = sizeof(Elf64_Ehdr) +
 680                        (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
 681        phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 682        phdr.p_vaddr   = phdr.p_paddr = 0;
 683        phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 684        phdr.p_align   = 0;
 685
 686        /* Add merged PT_NOTE program header*/
 687        tmp = elfptr + sizeof(Elf64_Ehdr);
 688        memcpy(tmp, &phdr, sizeof(phdr));
 689        tmp += sizeof(phdr);
 690
 691        /* Remove unwanted PT_NOTE program headers. */
 692        i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
 693        *elfsz = *elfsz - i;
 694        memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
 695        memset(elfptr + *elfsz, 0, i);
 696        *elfsz = roundup(*elfsz, PAGE_SIZE);
 697
 698        /* Modify e_phnum to reflect merged headers. */
 699        ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 700
 701        return 0;
 702}
 703
 704/**
 705 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
 706 *
 707 * @ehdr_ptr: ELF header
 708 *
 709 * This function updates p_memsz member of each PT_NOTE entry in the
 710 * program header table pointed to by @ehdr_ptr to real size of ELF
 711 * note segment.
 712 */
 713static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
 714{
 715        int i, rc=0;
 716        Elf32_Phdr *phdr_ptr;
 717        Elf32_Nhdr *nhdr_ptr;
 718
 719        phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 720        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 721                void *notes_section;
 722                u64 offset, max_sz, sz, real_sz = 0;
 723                if (phdr_ptr->p_type != PT_NOTE)
 724                        continue;
 725                max_sz = phdr_ptr->p_memsz;
 726                offset = phdr_ptr->p_offset;
 727                notes_section = kmalloc(max_sz, GFP_KERNEL);
 728                if (!notes_section)
 729                        return -ENOMEM;
 730                rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 731                if (rc < 0) {
 732                        kfree(notes_section);
 733                        return rc;
 734                }
 735                nhdr_ptr = notes_section;
 736                while (nhdr_ptr->n_namesz != 0) {
 737                        sz = sizeof(Elf32_Nhdr) +
 738                                (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 739                                (((u64)nhdr_ptr->n_descsz + 3) & ~3);
 740                        if ((real_sz + sz) > max_sz) {
 741                                pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 742                                        nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 743                                break;
 744                        }
 745                        real_sz += sz;
 746                        nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
 747                }
 748                kfree(notes_section);
 749                phdr_ptr->p_memsz = real_sz;
 750                if (real_sz == 0) {
 751                        pr_warn("Warning: Zero PT_NOTE entries found\n");
 752                }
 753        }
 754
 755        return 0;
 756}
 757
 758/**
 759 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
 760 * headers and sum of real size of their ELF note segment headers and
 761 * data.
 762 *
 763 * @ehdr_ptr: ELF header
 764 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 765 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 766 *
 767 * This function is used to merge multiple PT_NOTE program headers
 768 * into a unique single one. The resulting unique entry will have
 769 * @sz_ptnote in its phdr->p_mem.
 770 *
 771 * It is assumed that program headers with PT_NOTE type pointed to by
 772 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 773 * and each of PT_NOTE program headers has actual ELF note segment
 774 * size in its p_memsz member.
 775 */
 776static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
 777                                                 int *nr_ptnote, u64 *sz_ptnote)
 778{
 779        int i;
 780        Elf32_Phdr *phdr_ptr;
 781
 782        *nr_ptnote = *sz_ptnote = 0;
 783
 784        phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 785        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 786                if (phdr_ptr->p_type != PT_NOTE)
 787                        continue;
 788                *nr_ptnote += 1;
 789                *sz_ptnote += phdr_ptr->p_memsz;
 790        }
 791
 792        return 0;
 793}
 794
 795/**
 796 * copy_notes_elf32 - copy ELF note segments in a given buffer
 797 *
 798 * @ehdr_ptr: ELF header
 799 * @notes_buf: buffer into which ELF note segments are copied
 800 *
 801 * This function is used to copy ELF note segment in the 1st kernel
 802 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 803 * size of the buffer @notes_buf is equal to or larger than sum of the
 804 * real ELF note segment headers and data.
 805 *
 806 * It is assumed that program headers with PT_NOTE type pointed to by
 807 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 808 * and each of PT_NOTE program headers has actual ELF note segment
 809 * size in its p_memsz member.
 810 */
 811static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
 812{
 813        int i, rc=0;
 814        Elf32_Phdr *phdr_ptr;
 815
 816        phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
 817
 818        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 819                u64 offset;
 820                if (phdr_ptr->p_type != PT_NOTE)
 821                        continue;
 822                offset = phdr_ptr->p_offset;
 823                rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 824                                           &offset);
 825                if (rc < 0)
 826                        return rc;
 827                notes_buf += phdr_ptr->p_memsz;
 828        }
 829
 830        return 0;
 831}
 832
 833/* Merges all the PT_NOTE headers into one. */
 834static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
 835                                           char **notes_buf, size_t *notes_sz)
 836{
 837        int i, nr_ptnote=0, rc=0;
 838        char *tmp;
 839        Elf32_Ehdr *ehdr_ptr;
 840        Elf32_Phdr phdr;
 841        u64 phdr_sz = 0, note_off;
 842
 843        ehdr_ptr = (Elf32_Ehdr *)elfptr;
 844
 845        rc = update_note_header_size_elf32(ehdr_ptr);
 846        if (rc < 0)
 847                return rc;
 848
 849        rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
 850        if (rc < 0)
 851                return rc;
 852
 853        *notes_sz = roundup(phdr_sz, PAGE_SIZE);
 854        *notes_buf = alloc_elfnotes_buf(*notes_sz);
 855        if (!*notes_buf)
 856                return -ENOMEM;
 857
 858        rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
 859        if (rc < 0)
 860                return rc;
 861
 862        /* Prepare merged PT_NOTE program header. */
 863        phdr.p_type    = PT_NOTE;
 864        phdr.p_flags   = 0;
 865        note_off = sizeof(Elf32_Ehdr) +
 866                        (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
 867        phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 868        phdr.p_vaddr   = phdr.p_paddr = 0;
 869        phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 870        phdr.p_align   = 0;
 871
 872        /* Add merged PT_NOTE program header*/
 873        tmp = elfptr + sizeof(Elf32_Ehdr);
 874        memcpy(tmp, &phdr, sizeof(phdr));
 875        tmp += sizeof(phdr);
 876
 877        /* Remove unwanted PT_NOTE program headers. */
 878        i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
 879        *elfsz = *elfsz - i;
 880        memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
 881        memset(elfptr + *elfsz, 0, i);
 882        *elfsz = roundup(*elfsz, PAGE_SIZE);
 883
 884        /* Modify e_phnum to reflect merged headers. */
 885        ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 886
 887        return 0;
 888}
 889
 890/* Add memory chunks represented by program headers to vmcore list. Also update
 891 * the new offset fields of exported program headers. */
 892static int __init process_ptload_program_headers_elf64(char *elfptr,
 893                                                size_t elfsz,
 894                                                size_t elfnotes_sz,
 895                                                struct list_head *vc_list)
 896{
 897        int i;
 898        Elf64_Ehdr *ehdr_ptr;
 899        Elf64_Phdr *phdr_ptr;
 900        loff_t vmcore_off;
 901        struct vmcore *new;
 902
 903        ehdr_ptr = (Elf64_Ehdr *)elfptr;
 904        phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
 905
 906        /* Skip Elf header, program headers and Elf note segment. */
 907        vmcore_off = elfsz + elfnotes_sz;
 908
 909        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 910                u64 paddr, start, end, size;
 911
 912                if (phdr_ptr->p_type != PT_LOAD)
 913                        continue;
 914
 915                paddr = phdr_ptr->p_offset;
 916                start = rounddown(paddr, PAGE_SIZE);
 917                end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
 918                size = end - start;
 919
 920                /* Add this contiguous chunk of memory to vmcore list.*/
 921                new = get_new_element();
 922                if (!new)
 923                        return -ENOMEM;
 924                new->paddr = start;
 925                new->size = size;
 926                list_add_tail(&new->list, vc_list);
 927
 928                /* Update the program header offset. */
 929                phdr_ptr->p_offset = vmcore_off + (paddr - start);
 930                vmcore_off = vmcore_off + size;
 931        }
 932        return 0;
 933}
 934
 935static int __init process_ptload_program_headers_elf32(char *elfptr,
 936                                                size_t elfsz,
 937                                                size_t elfnotes_sz,
 938                                                struct list_head *vc_list)
 939{
 940        int i;
 941        Elf32_Ehdr *ehdr_ptr;
 942        Elf32_Phdr *phdr_ptr;
 943        loff_t vmcore_off;
 944        struct vmcore *new;
 945
 946        ehdr_ptr = (Elf32_Ehdr *)elfptr;
 947        phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
 948
 949        /* Skip Elf header, program headers and Elf note segment. */
 950        vmcore_off = elfsz + elfnotes_sz;
 951
 952        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 953                u64 paddr, start, end, size;
 954
 955                if (phdr_ptr->p_type != PT_LOAD)
 956                        continue;
 957
 958                paddr = phdr_ptr->p_offset;
 959                start = rounddown(paddr, PAGE_SIZE);
 960                end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
 961                size = end - start;
 962
 963                /* Add this contiguous chunk of memory to vmcore list.*/
 964                new = get_new_element();
 965                if (!new)
 966                        return -ENOMEM;
 967                new->paddr = start;
 968                new->size = size;
 969                list_add_tail(&new->list, vc_list);
 970
 971                /* Update the program header offset */
 972                phdr_ptr->p_offset = vmcore_off + (paddr - start);
 973                vmcore_off = vmcore_off + size;
 974        }
 975        return 0;
 976}
 977
 978/* Sets offset fields of vmcore elements. */
 979static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
 980                                           struct list_head *vc_list)
 981{
 982        loff_t vmcore_off;
 983        struct vmcore *m;
 984
 985        /* Skip Elf header, program headers and Elf note segment. */
 986        vmcore_off = elfsz + elfnotes_sz;
 987
 988        list_for_each_entry(m, vc_list, list) {
 989                m->offset = vmcore_off;
 990                vmcore_off += m->size;
 991        }
 992}
 993
 994static void free_elfcorebuf(void)
 995{
 996        free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
 997        elfcorebuf = NULL;
 998        vfree(elfnotes_buf);
 999        elfnotes_buf = NULL;
1000}
1001
1002static int __init parse_crash_elf64_headers(void)
1003{
1004        int rc=0;
1005        Elf64_Ehdr ehdr;
1006        u64 addr;
1007
1008        addr = elfcorehdr_addr;
1009
1010        /* Read Elf header */
1011        rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1012        if (rc < 0)
1013                return rc;
1014
1015        /* Do some basic Verification. */
1016        if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1017                (ehdr.e_type != ET_CORE) ||
1018                !vmcore_elf64_check_arch(&ehdr) ||
1019                ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1020                ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1021                ehdr.e_version != EV_CURRENT ||
1022                ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1023                ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1024                ehdr.e_phnum == 0) {
1025                pr_warn("Warning: Core image elf header is not sane\n");
1026                return -EINVAL;
1027        }
1028
1029        /* Read in all elf headers. */
1030        elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1031                                ehdr.e_phnum * sizeof(Elf64_Phdr);
1032        elfcorebuf_sz = elfcorebuf_sz_orig;
1033        elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1034                                              get_order(elfcorebuf_sz_orig));
1035        if (!elfcorebuf)
1036                return -ENOMEM;
1037        addr = elfcorehdr_addr;
1038        rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1039        if (rc < 0)
1040                goto fail;
1041
1042        /* Merge all PT_NOTE headers into one. */
1043        rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1044                                      &elfnotes_buf, &elfnotes_sz);
1045        if (rc)
1046                goto fail;
1047        rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1048                                                  elfnotes_sz, &vmcore_list);
1049        if (rc)
1050                goto fail;
1051        set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1052        return 0;
1053fail:
1054        free_elfcorebuf();
1055        return rc;
1056}
1057
1058static int __init parse_crash_elf32_headers(void)
1059{
1060        int rc=0;
1061        Elf32_Ehdr ehdr;
1062        u64 addr;
1063
1064        addr = elfcorehdr_addr;
1065
1066        /* Read Elf header */
1067        rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1068        if (rc < 0)
1069                return rc;
1070
1071        /* Do some basic Verification. */
1072        if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1073                (ehdr.e_type != ET_CORE) ||
1074                !elf_check_arch(&ehdr) ||
1075                ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1076                ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1077                ehdr.e_version != EV_CURRENT ||
1078                ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1079                ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1080                ehdr.e_phnum == 0) {
1081                pr_warn("Warning: Core image elf header is not sane\n");
1082                return -EINVAL;
1083        }
1084
1085        /* Read in all elf headers. */
1086        elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1087        elfcorebuf_sz = elfcorebuf_sz_orig;
1088        elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1089                                              get_order(elfcorebuf_sz_orig));
1090        if (!elfcorebuf)
1091                return -ENOMEM;
1092        addr = elfcorehdr_addr;
1093        rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1094        if (rc < 0)
1095                goto fail;
1096
1097        /* Merge all PT_NOTE headers into one. */
1098        rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1099                                      &elfnotes_buf, &elfnotes_sz);
1100        if (rc)
1101                goto fail;
1102        rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1103                                                  elfnotes_sz, &vmcore_list);
1104        if (rc)
1105                goto fail;
1106        set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1107        return 0;
1108fail:
1109        free_elfcorebuf();
1110        return rc;
1111}
1112
1113static int __init parse_crash_elf_headers(void)
1114{
1115        unsigned char e_ident[EI_NIDENT];
1116        u64 addr;
1117        int rc=0;
1118
1119        addr = elfcorehdr_addr;
1120        rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1121        if (rc < 0)
1122                return rc;
1123        if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1124                pr_warn("Warning: Core image elf header not found\n");
1125                return -EINVAL;
1126        }
1127
1128        if (e_ident[EI_CLASS] == ELFCLASS64) {
1129                rc = parse_crash_elf64_headers();
1130                if (rc)
1131                        return rc;
1132        } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1133                rc = parse_crash_elf32_headers();
1134                if (rc)
1135                        return rc;
1136        } else {
1137                pr_warn("Warning: Core image elf header is not sane\n");
1138                return -EINVAL;
1139        }
1140
1141        /* Determine vmcore size. */
1142        vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1143                                      &vmcore_list);
1144
1145        return 0;
1146}
1147
1148/* Init function for vmcore module. */
1149static int __init vmcore_init(void)
1150{
1151        int rc = 0;
1152
1153        /* Allow architectures to allocate ELF header in 2nd kernel */
1154        rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1155        if (rc)
1156                return rc;
1157        /*
1158         * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1159         * then capture the dump.
1160         */
1161        if (!(is_vmcore_usable()))
1162                return rc;
1163        rc = parse_crash_elf_headers();
1164        if (rc) {
1165                pr_warn("Kdump: vmcore not initialized\n");
1166                return rc;
1167        }
1168        elfcorehdr_free(elfcorehdr_addr);
1169        elfcorehdr_addr = ELFCORE_ADDR_ERR;
1170
1171        proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1172        if (proc_vmcore)
1173                proc_vmcore->size = vmcore_size;
1174        return 0;
1175}
1176fs_initcall(vmcore_init);
1177
1178/* Cleanup function for vmcore module. */
1179void vmcore_cleanup(void)
1180{
1181        struct list_head *pos, *next;
1182
1183        if (proc_vmcore) {
1184                proc_remove(proc_vmcore);
1185                proc_vmcore = NULL;
1186        }
1187
1188        /* clear the vmcore list. */
1189        list_for_each_safe(pos, next, &vmcore_list) {
1190                struct vmcore *m;
1191
1192                m = list_entry(pos, struct vmcore, list);
1193                list_del(&m->list);
1194                kfree(m);
1195        }
1196        free_elfcorebuf();
1197}
1198