linux/fs/proc/vmcore.c
<<
>>
Prefs
   1/*
   2 *      fs/proc/vmcore.c Interface for accessing the crash
   3 *                               dump from the system's previous life.
   4 *      Heavily borrowed from fs/proc/kcore.c
   5 *      Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
   6 *      Copyright (C) IBM Corporation, 2004. All rights reserved
   7 *
   8 */
   9
  10#include <linux/mm.h>
  11#include <linux/kcore.h>
  12#include <linux/user.h>
  13#include <linux/elf.h>
  14#include <linux/elfcore.h>
  15#include <linux/export.h>
  16#include <linux/slab.h>
  17#include <linux/highmem.h>
  18#include <linux/printk.h>
  19#include <linux/bootmem.h>
  20#include <linux/init.h>
  21#include <linux/crash_dump.h>
  22#include <linux/list.h>
  23#include <linux/mutex.h>
  24#include <linux/vmalloc.h>
  25#include <linux/pagemap.h>
  26#include <linux/uaccess.h>
  27#include <asm/io.h>
  28#include "internal.h"
  29
  30/* List representing chunks of contiguous memory areas and their offsets in
  31 * vmcore file.
  32 */
  33static LIST_HEAD(vmcore_list);
  34
  35/* Stores the pointer to the buffer containing kernel elf core headers. */
  36static char *elfcorebuf;
  37static size_t elfcorebuf_sz;
  38static size_t elfcorebuf_sz_orig;
  39
  40static char *elfnotes_buf;
  41static size_t elfnotes_sz;
  42/* Size of all notes minus the device dump notes */
  43static size_t elfnotes_orig_sz;
  44
  45/* Total size of vmcore file. */
  46static u64 vmcore_size;
  47
  48static struct proc_dir_entry *proc_vmcore;
  49
  50#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
  51/* Device Dump list and mutex to synchronize access to list */
  52static LIST_HEAD(vmcoredd_list);
  53static DEFINE_MUTEX(vmcoredd_mutex);
  54#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
  55
  56/* Device Dump Size */
  57static size_t vmcoredd_orig_sz;
  58
  59/*
  60 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
  61 * The called function has to take care of module refcounting.
  62 */
  63static int (*oldmem_pfn_is_ram)(unsigned long pfn);
  64
  65int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
  66{
  67        if (oldmem_pfn_is_ram)
  68                return -EBUSY;
  69        oldmem_pfn_is_ram = fn;
  70        return 0;
  71}
  72EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
  73
  74void unregister_oldmem_pfn_is_ram(void)
  75{
  76        oldmem_pfn_is_ram = NULL;
  77        wmb();
  78}
  79EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
  80
  81static int pfn_is_ram(unsigned long pfn)
  82{
  83        int (*fn)(unsigned long pfn);
  84        /* pfn is ram unless fn() checks pagetype */
  85        int ret = 1;
  86
  87        /*
  88         * Ask hypervisor if the pfn is really ram.
  89         * A ballooned page contains no data and reading from such a page
  90         * will cause high load in the hypervisor.
  91         */
  92        fn = oldmem_pfn_is_ram;
  93        if (fn)
  94                ret = fn(pfn);
  95
  96        return ret;
  97}
  98
  99/* Reads a page from the oldmem device from given offset. */
 100static ssize_t read_from_oldmem(char *buf, size_t count,
 101                                u64 *ppos, int userbuf)
 102{
 103        unsigned long pfn, offset;
 104        size_t nr_bytes;
 105        ssize_t read = 0, tmp;
 106
 107        if (!count)
 108                return 0;
 109
 110        offset = (unsigned long)(*ppos % PAGE_SIZE);
 111        pfn = (unsigned long)(*ppos / PAGE_SIZE);
 112
 113        do {
 114                if (count > (PAGE_SIZE - offset))
 115                        nr_bytes = PAGE_SIZE - offset;
 116                else
 117                        nr_bytes = count;
 118
 119                /* If pfn is not ram, return zeros for sparse dump files */
 120                if (pfn_is_ram(pfn) == 0)
 121                        memset(buf, 0, nr_bytes);
 122                else {
 123                        tmp = copy_oldmem_page(pfn, buf, nr_bytes,
 124                                                offset, userbuf);
 125                        if (tmp < 0)
 126                                return tmp;
 127                }
 128                *ppos += nr_bytes;
 129                count -= nr_bytes;
 130                buf += nr_bytes;
 131                read += nr_bytes;
 132                ++pfn;
 133                offset = 0;
 134        } while (count);
 135
 136        return read;
 137}
 138
 139/*
 140 * Architectures may override this function to allocate ELF header in 2nd kernel
 141 */
 142int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 143{
 144        return 0;
 145}
 146
 147/*
 148 * Architectures may override this function to free header
 149 */
 150void __weak elfcorehdr_free(unsigned long long addr)
 151{}
 152
 153/*
 154 * Architectures may override this function to read from ELF header
 155 */
 156ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 157{
 158        return read_from_oldmem(buf, count, ppos, 0);
 159}
 160
 161/*
 162 * Architectures may override this function to read from notes sections
 163 */
 164ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
 165{
 166        return read_from_oldmem(buf, count, ppos, 0);
 167}
 168
 169/*
 170 * Architectures may override this function to map oldmem
 171 */
 172int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
 173                                  unsigned long from, unsigned long pfn,
 174                                  unsigned long size, pgprot_t prot)
 175{
 176        return remap_pfn_range(vma, from, pfn, size, prot);
 177}
 178
 179/*
 180 * Copy to either kernel or user space
 181 */
 182static int copy_to(void *target, void *src, size_t size, int userbuf)
 183{
 184        if (userbuf) {
 185                if (copy_to_user((char __user *) target, src, size))
 186                        return -EFAULT;
 187        } else {
 188                memcpy(target, src, size);
 189        }
 190        return 0;
 191}
 192
 193#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 194static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
 195{
 196        struct vmcoredd_node *dump;
 197        u64 offset = 0;
 198        int ret = 0;
 199        size_t tsz;
 200        char *buf;
 201
 202        mutex_lock(&vmcoredd_mutex);
 203        list_for_each_entry(dump, &vmcoredd_list, list) {
 204                if (start < offset + dump->size) {
 205                        tsz = min(offset + (u64)dump->size - start, (u64)size);
 206                        buf = dump->buf + start - offset;
 207                        if (copy_to(dst, buf, tsz, userbuf)) {
 208                                ret = -EFAULT;
 209                                goto out_unlock;
 210                        }
 211
 212                        size -= tsz;
 213                        start += tsz;
 214                        dst += tsz;
 215
 216                        /* Leave now if buffer filled already */
 217                        if (!size)
 218                                goto out_unlock;
 219                }
 220                offset += dump->size;
 221        }
 222
 223out_unlock:
 224        mutex_unlock(&vmcoredd_mutex);
 225        return ret;
 226}
 227
 228#ifdef CONFIG_MMU
 229static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
 230                               u64 start, size_t size)
 231{
 232        struct vmcoredd_node *dump;
 233        u64 offset = 0;
 234        int ret = 0;
 235        size_t tsz;
 236        char *buf;
 237
 238        mutex_lock(&vmcoredd_mutex);
 239        list_for_each_entry(dump, &vmcoredd_list, list) {
 240                if (start < offset + dump->size) {
 241                        tsz = min(offset + (u64)dump->size - start, (u64)size);
 242                        buf = dump->buf + start - offset;
 243                        if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
 244                                ret = -EFAULT;
 245                                goto out_unlock;
 246                        }
 247
 248                        size -= tsz;
 249                        start += tsz;
 250                        dst += tsz;
 251
 252                        /* Leave now if buffer filled already */
 253                        if (!size)
 254                                goto out_unlock;
 255                }
 256                offset += dump->size;
 257        }
 258
 259out_unlock:
 260        mutex_unlock(&vmcoredd_mutex);
 261        return ret;
 262}
 263#endif /* CONFIG_MMU */
 264#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 265
 266/* Read from the ELF header and then the crash dump. On error, negative value is
 267 * returned otherwise number of bytes read are returned.
 268 */
 269static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
 270                             int userbuf)
 271{
 272        ssize_t acc = 0, tmp;
 273        size_t tsz;
 274        u64 start;
 275        struct vmcore *m = NULL;
 276
 277        if (buflen == 0 || *fpos >= vmcore_size)
 278                return 0;
 279
 280        /* trim buflen to not go beyond EOF */
 281        if (buflen > vmcore_size - *fpos)
 282                buflen = vmcore_size - *fpos;
 283
 284        /* Read ELF core header */
 285        if (*fpos < elfcorebuf_sz) {
 286                tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
 287                if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
 288                        return -EFAULT;
 289                buflen -= tsz;
 290                *fpos += tsz;
 291                buffer += tsz;
 292                acc += tsz;
 293
 294                /* leave now if filled buffer already */
 295                if (buflen == 0)
 296                        return acc;
 297        }
 298
 299        /* Read Elf note segment */
 300        if (*fpos < elfcorebuf_sz + elfnotes_sz) {
 301                void *kaddr;
 302
 303                /* We add device dumps before other elf notes because the
 304                 * other elf notes may not fill the elf notes buffer
 305                 * completely and we will end up with zero-filled data
 306                 * between the elf notes and the device dumps. Tools will
 307                 * then try to decode this zero-filled data as valid notes
 308                 * and we don't want that. Hence, adding device dumps before
 309                 * the other elf notes ensure that zero-filled data can be
 310                 * avoided.
 311                 */
 312#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 313                /* Read device dumps */
 314                if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
 315                        tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
 316                                  (size_t)*fpos, buflen);
 317                        start = *fpos - elfcorebuf_sz;
 318                        if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
 319                                return -EFAULT;
 320
 321                        buflen -= tsz;
 322                        *fpos += tsz;
 323                        buffer += tsz;
 324                        acc += tsz;
 325
 326                        /* leave now if filled buffer already */
 327                        if (!buflen)
 328                                return acc;
 329                }
 330#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 331
 332                /* Read remaining elf notes */
 333                tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
 334                kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
 335                if (copy_to(buffer, kaddr, tsz, userbuf))
 336                        return -EFAULT;
 337
 338                buflen -= tsz;
 339                *fpos += tsz;
 340                buffer += tsz;
 341                acc += tsz;
 342
 343                /* leave now if filled buffer already */
 344                if (buflen == 0)
 345                        return acc;
 346        }
 347
 348        list_for_each_entry(m, &vmcore_list, list) {
 349                if (*fpos < m->offset + m->size) {
 350                        tsz = (size_t)min_t(unsigned long long,
 351                                            m->offset + m->size - *fpos,
 352                                            buflen);
 353                        start = m->paddr + *fpos - m->offset;
 354                        tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
 355                        if (tmp < 0)
 356                                return tmp;
 357                        buflen -= tsz;
 358                        *fpos += tsz;
 359                        buffer += tsz;
 360                        acc += tsz;
 361
 362                        /* leave now if filled buffer already */
 363                        if (buflen == 0)
 364                                return acc;
 365                }
 366        }
 367
 368        return acc;
 369}
 370
 371static ssize_t read_vmcore(struct file *file, char __user *buffer,
 372                           size_t buflen, loff_t *fpos)
 373{
 374        return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
 375}
 376
 377/*
 378 * The vmcore fault handler uses the page cache and fills data using the
 379 * standard __vmcore_read() function.
 380 *
 381 * On s390 the fault handler is used for memory regions that can't be mapped
 382 * directly with remap_pfn_range().
 383 */
 384static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
 385{
 386#ifdef CONFIG_S390
 387        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 388        pgoff_t index = vmf->pgoff;
 389        struct page *page;
 390        loff_t offset;
 391        char *buf;
 392        int rc;
 393
 394        page = find_or_create_page(mapping, index, GFP_KERNEL);
 395        if (!page)
 396                return VM_FAULT_OOM;
 397        if (!PageUptodate(page)) {
 398                offset = (loff_t) index << PAGE_SHIFT;
 399                buf = __va((page_to_pfn(page) << PAGE_SHIFT));
 400                rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
 401                if (rc < 0) {
 402                        unlock_page(page);
 403                        put_page(page);
 404                        return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
 405                }
 406                SetPageUptodate(page);
 407        }
 408        unlock_page(page);
 409        vmf->page = page;
 410        return 0;
 411#else
 412        return VM_FAULT_SIGBUS;
 413#endif
 414}
 415
 416static const struct vm_operations_struct vmcore_mmap_ops = {
 417        .fault = mmap_vmcore_fault,
 418};
 419
 420/**
 421 * vmcore_alloc_buf - allocate buffer in vmalloc memory
 422 * @sizez: size of buffer
 423 *
 424 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
 425 * the buffer to user-space by means of remap_vmalloc_range().
 426 *
 427 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
 428 * disabled and there's no need to allow users to mmap the buffer.
 429 */
 430static inline char *vmcore_alloc_buf(size_t size)
 431{
 432#ifdef CONFIG_MMU
 433        return vmalloc_user(size);
 434#else
 435        return vzalloc(size);
 436#endif
 437}
 438
 439/*
 440 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
 441 * essential for mmap_vmcore() in order to map physically
 442 * non-contiguous objects (ELF header, ELF note segment and memory
 443 * regions in the 1st kernel pointed to by PT_LOAD entries) into
 444 * virtually contiguous user-space in ELF layout.
 445 */
 446#ifdef CONFIG_MMU
 447/*
 448 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
 449 * reported as not being ram with the zero page.
 450 *
 451 * @vma: vm_area_struct describing requested mapping
 452 * @from: start remapping from
 453 * @pfn: page frame number to start remapping to
 454 * @size: remapping size
 455 * @prot: protection bits
 456 *
 457 * Returns zero on success, -EAGAIN on failure.
 458 */
 459static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
 460                                    unsigned long from, unsigned long pfn,
 461                                    unsigned long size, pgprot_t prot)
 462{
 463        unsigned long map_size;
 464        unsigned long pos_start, pos_end, pos;
 465        unsigned long zeropage_pfn = my_zero_pfn(0);
 466        size_t len = 0;
 467
 468        pos_start = pfn;
 469        pos_end = pfn + (size >> PAGE_SHIFT);
 470
 471        for (pos = pos_start; pos < pos_end; ++pos) {
 472                if (!pfn_is_ram(pos)) {
 473                        /*
 474                         * We hit a page which is not ram. Remap the continuous
 475                         * region between pos_start and pos-1 and replace
 476                         * the non-ram page at pos with the zero page.
 477                         */
 478                        if (pos > pos_start) {
 479                                /* Remap continuous region */
 480                                map_size = (pos - pos_start) << PAGE_SHIFT;
 481                                if (remap_oldmem_pfn_range(vma, from + len,
 482                                                           pos_start, map_size,
 483                                                           prot))
 484                                        goto fail;
 485                                len += map_size;
 486                        }
 487                        /* Remap the zero page */
 488                        if (remap_oldmem_pfn_range(vma, from + len,
 489                                                   zeropage_pfn,
 490                                                   PAGE_SIZE, prot))
 491                                goto fail;
 492                        len += PAGE_SIZE;
 493                        pos_start = pos + 1;
 494                }
 495        }
 496        if (pos > pos_start) {
 497                /* Remap the rest */
 498                map_size = (pos - pos_start) << PAGE_SHIFT;
 499                if (remap_oldmem_pfn_range(vma, from + len, pos_start,
 500                                           map_size, prot))
 501                        goto fail;
 502        }
 503        return 0;
 504fail:
 505        do_munmap(vma->vm_mm, from, len, NULL);
 506        return -EAGAIN;
 507}
 508
 509static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
 510                            unsigned long from, unsigned long pfn,
 511                            unsigned long size, pgprot_t prot)
 512{
 513        /*
 514         * Check if oldmem_pfn_is_ram was registered to avoid
 515         * looping over all pages without a reason.
 516         */
 517        if (oldmem_pfn_is_ram)
 518                return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
 519        else
 520                return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
 521}
 522
 523static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 524{
 525        size_t size = vma->vm_end - vma->vm_start;
 526        u64 start, end, len, tsz;
 527        struct vmcore *m;
 528
 529        start = (u64)vma->vm_pgoff << PAGE_SHIFT;
 530        end = start + size;
 531
 532        if (size > vmcore_size || end > vmcore_size)
 533                return -EINVAL;
 534
 535        if (vma->vm_flags & (VM_WRITE | VM_EXEC))
 536                return -EPERM;
 537
 538        vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
 539        vma->vm_flags |= VM_MIXEDMAP;
 540        vma->vm_ops = &vmcore_mmap_ops;
 541
 542        len = 0;
 543
 544        if (start < elfcorebuf_sz) {
 545                u64 pfn;
 546
 547                tsz = min(elfcorebuf_sz - (size_t)start, size);
 548                pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
 549                if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
 550                                    vma->vm_page_prot))
 551                        return -EAGAIN;
 552                size -= tsz;
 553                start += tsz;
 554                len += tsz;
 555
 556                if (size == 0)
 557                        return 0;
 558        }
 559
 560        if (start < elfcorebuf_sz + elfnotes_sz) {
 561                void *kaddr;
 562
 563                /* We add device dumps before other elf notes because the
 564                 * other elf notes may not fill the elf notes buffer
 565                 * completely and we will end up with zero-filled data
 566                 * between the elf notes and the device dumps. Tools will
 567                 * then try to decode this zero-filled data as valid notes
 568                 * and we don't want that. Hence, adding device dumps before
 569                 * the other elf notes ensure that zero-filled data can be
 570                 * avoided. This also ensures that the device dumps and
 571                 * other elf notes can be properly mmaped at page aligned
 572                 * address.
 573                 */
 574#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 575                /* Read device dumps */
 576                if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
 577                        u64 start_off;
 578
 579                        tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
 580                                  (size_t)start, size);
 581                        start_off = start - elfcorebuf_sz;
 582                        if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
 583                                                start_off, tsz))
 584                                goto fail;
 585
 586                        size -= tsz;
 587                        start += tsz;
 588                        len += tsz;
 589
 590                        /* leave now if filled buffer already */
 591                        if (!size)
 592                                return 0;
 593                }
 594#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 595
 596                /* Read remaining elf notes */
 597                tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
 598                kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
 599                if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
 600                                                kaddr, tsz))
 601                        goto fail;
 602
 603                size -= tsz;
 604                start += tsz;
 605                len += tsz;
 606
 607                if (size == 0)
 608                        return 0;
 609        }
 610
 611        list_for_each_entry(m, &vmcore_list, list) {
 612                if (start < m->offset + m->size) {
 613                        u64 paddr = 0;
 614
 615                        tsz = (size_t)min_t(unsigned long long,
 616                                            m->offset + m->size - start, size);
 617                        paddr = m->paddr + start - m->offset;
 618                        if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
 619                                                    paddr >> PAGE_SHIFT, tsz,
 620                                                    vma->vm_page_prot))
 621                                goto fail;
 622                        size -= tsz;
 623                        start += tsz;
 624                        len += tsz;
 625
 626                        if (size == 0)
 627                                return 0;
 628                }
 629        }
 630
 631        return 0;
 632fail:
 633        do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
 634        return -EAGAIN;
 635}
 636#else
 637static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 638{
 639        return -ENOSYS;
 640}
 641#endif
 642
 643static const struct file_operations proc_vmcore_operations = {
 644        .read           = read_vmcore,
 645        .llseek         = default_llseek,
 646        .mmap           = mmap_vmcore,
 647};
 648
 649static struct vmcore* __init get_new_element(void)
 650{
 651        return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
 652}
 653
 654static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
 655                           struct list_head *vc_list)
 656{
 657        u64 size;
 658        struct vmcore *m;
 659
 660        size = elfsz + elfnotesegsz;
 661        list_for_each_entry(m, vc_list, list) {
 662                size += m->size;
 663        }
 664        return size;
 665}
 666
 667/**
 668 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
 669 *
 670 * @ehdr_ptr: ELF header
 671 *
 672 * This function updates p_memsz member of each PT_NOTE entry in the
 673 * program header table pointed to by @ehdr_ptr to real size of ELF
 674 * note segment.
 675 */
 676static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
 677{
 678        int i, rc=0;
 679        Elf64_Phdr *phdr_ptr;
 680        Elf64_Nhdr *nhdr_ptr;
 681
 682        phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 683        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 684                void *notes_section;
 685                u64 offset, max_sz, sz, real_sz = 0;
 686                if (phdr_ptr->p_type != PT_NOTE)
 687                        continue;
 688                max_sz = phdr_ptr->p_memsz;
 689                offset = phdr_ptr->p_offset;
 690                notes_section = kmalloc(max_sz, GFP_KERNEL);
 691                if (!notes_section)
 692                        return -ENOMEM;
 693                rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 694                if (rc < 0) {
 695                        kfree(notes_section);
 696                        return rc;
 697                }
 698                nhdr_ptr = notes_section;
 699                while (nhdr_ptr->n_namesz != 0) {
 700                        sz = sizeof(Elf64_Nhdr) +
 701                                (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 702                                (((u64)nhdr_ptr->n_descsz + 3) & ~3);
 703                        if ((real_sz + sz) > max_sz) {
 704                                pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 705                                        nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 706                                break;
 707                        }
 708                        real_sz += sz;
 709                        nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
 710                }
 711                kfree(notes_section);
 712                phdr_ptr->p_memsz = real_sz;
 713                if (real_sz == 0) {
 714                        pr_warn("Warning: Zero PT_NOTE entries found\n");
 715                }
 716        }
 717
 718        return 0;
 719}
 720
 721/**
 722 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
 723 * headers and sum of real size of their ELF note segment headers and
 724 * data.
 725 *
 726 * @ehdr_ptr: ELF header
 727 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 728 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 729 *
 730 * This function is used to merge multiple PT_NOTE program headers
 731 * into a unique single one. The resulting unique entry will have
 732 * @sz_ptnote in its phdr->p_mem.
 733 *
 734 * It is assumed that program headers with PT_NOTE type pointed to by
 735 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 736 * and each of PT_NOTE program headers has actual ELF note segment
 737 * size in its p_memsz member.
 738 */
 739static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
 740                                                 int *nr_ptnote, u64 *sz_ptnote)
 741{
 742        int i;
 743        Elf64_Phdr *phdr_ptr;
 744
 745        *nr_ptnote = *sz_ptnote = 0;
 746
 747        phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 748        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 749                if (phdr_ptr->p_type != PT_NOTE)
 750                        continue;
 751                *nr_ptnote += 1;
 752                *sz_ptnote += phdr_ptr->p_memsz;
 753        }
 754
 755        return 0;
 756}
 757
 758/**
 759 * copy_notes_elf64 - copy ELF note segments in a given buffer
 760 *
 761 * @ehdr_ptr: ELF header
 762 * @notes_buf: buffer into which ELF note segments are copied
 763 *
 764 * This function is used to copy ELF note segment in the 1st kernel
 765 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 766 * size of the buffer @notes_buf is equal to or larger than sum of the
 767 * real ELF note segment headers and data.
 768 *
 769 * It is assumed that program headers with PT_NOTE type pointed to by
 770 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 771 * and each of PT_NOTE program headers has actual ELF note segment
 772 * size in its p_memsz member.
 773 */
 774static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
 775{
 776        int i, rc=0;
 777        Elf64_Phdr *phdr_ptr;
 778
 779        phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
 780
 781        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 782                u64 offset;
 783                if (phdr_ptr->p_type != PT_NOTE)
 784                        continue;
 785                offset = phdr_ptr->p_offset;
 786                rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 787                                           &offset);
 788                if (rc < 0)
 789                        return rc;
 790                notes_buf += phdr_ptr->p_memsz;
 791        }
 792
 793        return 0;
 794}
 795
 796/* Merges all the PT_NOTE headers into one. */
 797static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
 798                                           char **notes_buf, size_t *notes_sz)
 799{
 800        int i, nr_ptnote=0, rc=0;
 801        char *tmp;
 802        Elf64_Ehdr *ehdr_ptr;
 803        Elf64_Phdr phdr;
 804        u64 phdr_sz = 0, note_off;
 805
 806        ehdr_ptr = (Elf64_Ehdr *)elfptr;
 807
 808        rc = update_note_header_size_elf64(ehdr_ptr);
 809        if (rc < 0)
 810                return rc;
 811
 812        rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
 813        if (rc < 0)
 814                return rc;
 815
 816        *notes_sz = roundup(phdr_sz, PAGE_SIZE);
 817        *notes_buf = vmcore_alloc_buf(*notes_sz);
 818        if (!*notes_buf)
 819                return -ENOMEM;
 820
 821        rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
 822        if (rc < 0)
 823                return rc;
 824
 825        /* Prepare merged PT_NOTE program header. */
 826        phdr.p_type    = PT_NOTE;
 827        phdr.p_flags   = 0;
 828        note_off = sizeof(Elf64_Ehdr) +
 829                        (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
 830        phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 831        phdr.p_vaddr   = phdr.p_paddr = 0;
 832        phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 833        phdr.p_align   = 0;
 834
 835        /* Add merged PT_NOTE program header*/
 836        tmp = elfptr + sizeof(Elf64_Ehdr);
 837        memcpy(tmp, &phdr, sizeof(phdr));
 838        tmp += sizeof(phdr);
 839
 840        /* Remove unwanted PT_NOTE program headers. */
 841        i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
 842        *elfsz = *elfsz - i;
 843        memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
 844        memset(elfptr + *elfsz, 0, i);
 845        *elfsz = roundup(*elfsz, PAGE_SIZE);
 846
 847        /* Modify e_phnum to reflect merged headers. */
 848        ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 849
 850        /* Store the size of all notes.  We need this to update the note
 851         * header when the device dumps will be added.
 852         */
 853        elfnotes_orig_sz = phdr.p_memsz;
 854
 855        return 0;
 856}
 857
 858/**
 859 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
 860 *
 861 * @ehdr_ptr: ELF header
 862 *
 863 * This function updates p_memsz member of each PT_NOTE entry in the
 864 * program header table pointed to by @ehdr_ptr to real size of ELF
 865 * note segment.
 866 */
 867static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
 868{
 869        int i, rc=0;
 870        Elf32_Phdr *phdr_ptr;
 871        Elf32_Nhdr *nhdr_ptr;
 872
 873        phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 874        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 875                void *notes_section;
 876                u64 offset, max_sz, sz, real_sz = 0;
 877                if (phdr_ptr->p_type != PT_NOTE)
 878                        continue;
 879                max_sz = phdr_ptr->p_memsz;
 880                offset = phdr_ptr->p_offset;
 881                notes_section = kmalloc(max_sz, GFP_KERNEL);
 882                if (!notes_section)
 883                        return -ENOMEM;
 884                rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 885                if (rc < 0) {
 886                        kfree(notes_section);
 887                        return rc;
 888                }
 889                nhdr_ptr = notes_section;
 890                while (nhdr_ptr->n_namesz != 0) {
 891                        sz = sizeof(Elf32_Nhdr) +
 892                                (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 893                                (((u64)nhdr_ptr->n_descsz + 3) & ~3);
 894                        if ((real_sz + sz) > max_sz) {
 895                                pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 896                                        nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 897                                break;
 898                        }
 899                        real_sz += sz;
 900                        nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
 901                }
 902                kfree(notes_section);
 903                phdr_ptr->p_memsz = real_sz;
 904                if (real_sz == 0) {
 905                        pr_warn("Warning: Zero PT_NOTE entries found\n");
 906                }
 907        }
 908
 909        return 0;
 910}
 911
 912/**
 913 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
 914 * headers and sum of real size of their ELF note segment headers and
 915 * data.
 916 *
 917 * @ehdr_ptr: ELF header
 918 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 919 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 920 *
 921 * This function is used to merge multiple PT_NOTE program headers
 922 * into a unique single one. The resulting unique entry will have
 923 * @sz_ptnote in its phdr->p_mem.
 924 *
 925 * It is assumed that program headers with PT_NOTE type pointed to by
 926 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 927 * and each of PT_NOTE program headers has actual ELF note segment
 928 * size in its p_memsz member.
 929 */
 930static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
 931                                                 int *nr_ptnote, u64 *sz_ptnote)
 932{
 933        int i;
 934        Elf32_Phdr *phdr_ptr;
 935
 936        *nr_ptnote = *sz_ptnote = 0;
 937
 938        phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 939        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 940                if (phdr_ptr->p_type != PT_NOTE)
 941                        continue;
 942                *nr_ptnote += 1;
 943                *sz_ptnote += phdr_ptr->p_memsz;
 944        }
 945
 946        return 0;
 947}
 948
 949/**
 950 * copy_notes_elf32 - copy ELF note segments in a given buffer
 951 *
 952 * @ehdr_ptr: ELF header
 953 * @notes_buf: buffer into which ELF note segments are copied
 954 *
 955 * This function is used to copy ELF note segment in the 1st kernel
 956 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 957 * size of the buffer @notes_buf is equal to or larger than sum of the
 958 * real ELF note segment headers and data.
 959 *
 960 * It is assumed that program headers with PT_NOTE type pointed to by
 961 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 962 * and each of PT_NOTE program headers has actual ELF note segment
 963 * size in its p_memsz member.
 964 */
 965static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
 966{
 967        int i, rc=0;
 968        Elf32_Phdr *phdr_ptr;
 969
 970        phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
 971
 972        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 973                u64 offset;
 974                if (phdr_ptr->p_type != PT_NOTE)
 975                        continue;
 976                offset = phdr_ptr->p_offset;
 977                rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 978                                           &offset);
 979                if (rc < 0)
 980                        return rc;
 981                notes_buf += phdr_ptr->p_memsz;
 982        }
 983
 984        return 0;
 985}
 986
 987/* Merges all the PT_NOTE headers into one. */
 988static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
 989                                           char **notes_buf, size_t *notes_sz)
 990{
 991        int i, nr_ptnote=0, rc=0;
 992        char *tmp;
 993        Elf32_Ehdr *ehdr_ptr;
 994        Elf32_Phdr phdr;
 995        u64 phdr_sz = 0, note_off;
 996
 997        ehdr_ptr = (Elf32_Ehdr *)elfptr;
 998
 999        rc = update_note_header_size_elf32(ehdr_ptr);
1000        if (rc < 0)
1001                return rc;
1002
1003        rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1004        if (rc < 0)
1005                return rc;
1006
1007        *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1008        *notes_buf = vmcore_alloc_buf(*notes_sz);
1009        if (!*notes_buf)
1010                return -ENOMEM;
1011
1012        rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1013        if (rc < 0)
1014                return rc;
1015
1016        /* Prepare merged PT_NOTE program header. */
1017        phdr.p_type    = PT_NOTE;
1018        phdr.p_flags   = 0;
1019        note_off = sizeof(Elf32_Ehdr) +
1020                        (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1021        phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1022        phdr.p_vaddr   = phdr.p_paddr = 0;
1023        phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1024        phdr.p_align   = 0;
1025
1026        /* Add merged PT_NOTE program header*/
1027        tmp = elfptr + sizeof(Elf32_Ehdr);
1028        memcpy(tmp, &phdr, sizeof(phdr));
1029        tmp += sizeof(phdr);
1030
1031        /* Remove unwanted PT_NOTE program headers. */
1032        i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1033        *elfsz = *elfsz - i;
1034        memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1035        memset(elfptr + *elfsz, 0, i);
1036        *elfsz = roundup(*elfsz, PAGE_SIZE);
1037
1038        /* Modify e_phnum to reflect merged headers. */
1039        ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1040
1041        /* Store the size of all notes.  We need this to update the note
1042         * header when the device dumps will be added.
1043         */
1044        elfnotes_orig_sz = phdr.p_memsz;
1045
1046        return 0;
1047}
1048
1049/* Add memory chunks represented by program headers to vmcore list. Also update
1050 * the new offset fields of exported program headers. */
1051static int __init process_ptload_program_headers_elf64(char *elfptr,
1052                                                size_t elfsz,
1053                                                size_t elfnotes_sz,
1054                                                struct list_head *vc_list)
1055{
1056        int i;
1057        Elf64_Ehdr *ehdr_ptr;
1058        Elf64_Phdr *phdr_ptr;
1059        loff_t vmcore_off;
1060        struct vmcore *new;
1061
1062        ehdr_ptr = (Elf64_Ehdr *)elfptr;
1063        phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1064
1065        /* Skip Elf header, program headers and Elf note segment. */
1066        vmcore_off = elfsz + elfnotes_sz;
1067
1068        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1069                u64 paddr, start, end, size;
1070
1071                if (phdr_ptr->p_type != PT_LOAD)
1072                        continue;
1073
1074                paddr = phdr_ptr->p_offset;
1075                start = rounddown(paddr, PAGE_SIZE);
1076                end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1077                size = end - start;
1078
1079                /* Add this contiguous chunk of memory to vmcore list.*/
1080                new = get_new_element();
1081                if (!new)
1082                        return -ENOMEM;
1083                new->paddr = start;
1084                new->size = size;
1085                list_add_tail(&new->list, vc_list);
1086
1087                /* Update the program header offset. */
1088                phdr_ptr->p_offset = vmcore_off + (paddr - start);
1089                vmcore_off = vmcore_off + size;
1090        }
1091        return 0;
1092}
1093
1094static int __init process_ptload_program_headers_elf32(char *elfptr,
1095                                                size_t elfsz,
1096                                                size_t elfnotes_sz,
1097                                                struct list_head *vc_list)
1098{
1099        int i;
1100        Elf32_Ehdr *ehdr_ptr;
1101        Elf32_Phdr *phdr_ptr;
1102        loff_t vmcore_off;
1103        struct vmcore *new;
1104
1105        ehdr_ptr = (Elf32_Ehdr *)elfptr;
1106        phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1107
1108        /* Skip Elf header, program headers and Elf note segment. */
1109        vmcore_off = elfsz + elfnotes_sz;
1110
1111        for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1112                u64 paddr, start, end, size;
1113
1114                if (phdr_ptr->p_type != PT_LOAD)
1115                        continue;
1116
1117                paddr = phdr_ptr->p_offset;
1118                start = rounddown(paddr, PAGE_SIZE);
1119                end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1120                size = end - start;
1121
1122                /* Add this contiguous chunk of memory to vmcore list.*/
1123                new = get_new_element();
1124                if (!new)
1125                        return -ENOMEM;
1126                new->paddr = start;
1127                new->size = size;
1128                list_add_tail(&new->list, vc_list);
1129
1130                /* Update the program header offset */
1131                phdr_ptr->p_offset = vmcore_off + (paddr - start);
1132                vmcore_off = vmcore_off + size;
1133        }
1134        return 0;
1135}
1136
1137/* Sets offset fields of vmcore elements. */
1138static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1139                                    struct list_head *vc_list)
1140{
1141        loff_t vmcore_off;
1142        struct vmcore *m;
1143
1144        /* Skip Elf header, program headers and Elf note segment. */
1145        vmcore_off = elfsz + elfnotes_sz;
1146
1147        list_for_each_entry(m, vc_list, list) {
1148                m->offset = vmcore_off;
1149                vmcore_off += m->size;
1150        }
1151}
1152
1153static void free_elfcorebuf(void)
1154{
1155        free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1156        elfcorebuf = NULL;
1157        vfree(elfnotes_buf);
1158        elfnotes_buf = NULL;
1159}
1160
1161static int __init parse_crash_elf64_headers(void)
1162{
1163        int rc=0;
1164        Elf64_Ehdr ehdr;
1165        u64 addr;
1166
1167        addr = elfcorehdr_addr;
1168
1169        /* Read Elf header */
1170        rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1171        if (rc < 0)
1172                return rc;
1173
1174        /* Do some basic Verification. */
1175        if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1176                (ehdr.e_type != ET_CORE) ||
1177                !vmcore_elf64_check_arch(&ehdr) ||
1178                ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1179                ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1180                ehdr.e_version != EV_CURRENT ||
1181                ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1182                ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1183                ehdr.e_phnum == 0) {
1184                pr_warn("Warning: Core image elf header is not sane\n");
1185                return -EINVAL;
1186        }
1187
1188        /* Read in all elf headers. */
1189        elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1190                                ehdr.e_phnum * sizeof(Elf64_Phdr);
1191        elfcorebuf_sz = elfcorebuf_sz_orig;
1192        elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1193                                              get_order(elfcorebuf_sz_orig));
1194        if (!elfcorebuf)
1195                return -ENOMEM;
1196        addr = elfcorehdr_addr;
1197        rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1198        if (rc < 0)
1199                goto fail;
1200
1201        /* Merge all PT_NOTE headers into one. */
1202        rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1203                                      &elfnotes_buf, &elfnotes_sz);
1204        if (rc)
1205                goto fail;
1206        rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1207                                                  elfnotes_sz, &vmcore_list);
1208        if (rc)
1209                goto fail;
1210        set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1211        return 0;
1212fail:
1213        free_elfcorebuf();
1214        return rc;
1215}
1216
1217static int __init parse_crash_elf32_headers(void)
1218{
1219        int rc=0;
1220        Elf32_Ehdr ehdr;
1221        u64 addr;
1222
1223        addr = elfcorehdr_addr;
1224
1225        /* Read Elf header */
1226        rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1227        if (rc < 0)
1228                return rc;
1229
1230        /* Do some basic Verification. */
1231        if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1232                (ehdr.e_type != ET_CORE) ||
1233                !vmcore_elf32_check_arch(&ehdr) ||
1234                ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1235                ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1236                ehdr.e_version != EV_CURRENT ||
1237                ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1238                ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1239                ehdr.e_phnum == 0) {
1240                pr_warn("Warning: Core image elf header is not sane\n");
1241                return -EINVAL;
1242        }
1243
1244        /* Read in all elf headers. */
1245        elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1246        elfcorebuf_sz = elfcorebuf_sz_orig;
1247        elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1248                                              get_order(elfcorebuf_sz_orig));
1249        if (!elfcorebuf)
1250                return -ENOMEM;
1251        addr = elfcorehdr_addr;
1252        rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1253        if (rc < 0)
1254                goto fail;
1255
1256        /* Merge all PT_NOTE headers into one. */
1257        rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1258                                      &elfnotes_buf, &elfnotes_sz);
1259        if (rc)
1260                goto fail;
1261        rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1262                                                  elfnotes_sz, &vmcore_list);
1263        if (rc)
1264                goto fail;
1265        set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1266        return 0;
1267fail:
1268        free_elfcorebuf();
1269        return rc;
1270}
1271
1272static int __init parse_crash_elf_headers(void)
1273{
1274        unsigned char e_ident[EI_NIDENT];
1275        u64 addr;
1276        int rc=0;
1277
1278        addr = elfcorehdr_addr;
1279        rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1280        if (rc < 0)
1281                return rc;
1282        if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1283                pr_warn("Warning: Core image elf header not found\n");
1284                return -EINVAL;
1285        }
1286
1287        if (e_ident[EI_CLASS] == ELFCLASS64) {
1288                rc = parse_crash_elf64_headers();
1289                if (rc)
1290                        return rc;
1291        } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1292                rc = parse_crash_elf32_headers();
1293                if (rc)
1294                        return rc;
1295        } else {
1296                pr_warn("Warning: Core image elf header is not sane\n");
1297                return -EINVAL;
1298        }
1299
1300        /* Determine vmcore size. */
1301        vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1302                                      &vmcore_list);
1303
1304        return 0;
1305}
1306
1307#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1308/**
1309 * vmcoredd_write_header - Write vmcore device dump header at the
1310 * beginning of the dump's buffer.
1311 * @buf: Output buffer where the note is written
1312 * @data: Dump info
1313 * @size: Size of the dump
1314 *
1315 * Fills beginning of the dump's buffer with vmcore device dump header.
1316 */
1317static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1318                                  u32 size)
1319{
1320        struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1321
1322        vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1323        vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1324        vdd_hdr->n_type = NT_VMCOREDD;
1325
1326        strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1327                sizeof(vdd_hdr->name));
1328        memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1329}
1330
1331/**
1332 * vmcoredd_update_program_headers - Update all Elf program headers
1333 * @elfptr: Pointer to elf header
1334 * @elfnotesz: Size of elf notes aligned to page size
1335 * @vmcoreddsz: Size of device dumps to be added to elf note header
1336 *
1337 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1338 * Also update the offsets of all the program headers after the elf note header.
1339 */
1340static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1341                                            size_t vmcoreddsz)
1342{
1343        unsigned char *e_ident = (unsigned char *)elfptr;
1344        u64 start, end, size;
1345        loff_t vmcore_off;
1346        u32 i;
1347
1348        vmcore_off = elfcorebuf_sz + elfnotesz;
1349
1350        if (e_ident[EI_CLASS] == ELFCLASS64) {
1351                Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1352                Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1353
1354                /* Update all program headers */
1355                for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1356                        if (phdr->p_type == PT_NOTE) {
1357                                /* Update note size */
1358                                phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1359                                phdr->p_filesz = phdr->p_memsz;
1360                                continue;
1361                        }
1362
1363                        start = rounddown(phdr->p_offset, PAGE_SIZE);
1364                        end = roundup(phdr->p_offset + phdr->p_memsz,
1365                                      PAGE_SIZE);
1366                        size = end - start;
1367                        phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1368                        vmcore_off += size;
1369                }
1370        } else {
1371                Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1372                Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1373
1374                /* Update all program headers */
1375                for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1376                        if (phdr->p_type == PT_NOTE) {
1377                                /* Update note size */
1378                                phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1379                                phdr->p_filesz = phdr->p_memsz;
1380                                continue;
1381                        }
1382
1383                        start = rounddown(phdr->p_offset, PAGE_SIZE);
1384                        end = roundup(phdr->p_offset + phdr->p_memsz,
1385                                      PAGE_SIZE);
1386                        size = end - start;
1387                        phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1388                        vmcore_off += size;
1389                }
1390        }
1391}
1392
1393/**
1394 * vmcoredd_update_size - Update the total size of the device dumps and update
1395 * Elf header
1396 * @dump_size: Size of the current device dump to be added to total size
1397 *
1398 * Update the total size of all the device dumps and update the Elf program
1399 * headers. Calculate the new offsets for the vmcore list and update the
1400 * total vmcore size.
1401 */
1402static void vmcoredd_update_size(size_t dump_size)
1403{
1404        vmcoredd_orig_sz += dump_size;
1405        elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1406        vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1407                                        vmcoredd_orig_sz);
1408
1409        /* Update vmcore list offsets */
1410        set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1411
1412        vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1413                                      &vmcore_list);
1414        proc_vmcore->size = vmcore_size;
1415}
1416
1417/**
1418 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1419 * @data: dump info.
1420 *
1421 * Allocate a buffer and invoke the calling driver's dump collect routine.
1422 * Write Elf note at the beginning of the buffer to indicate vmcore device
1423 * dump and add the dump to global list.
1424 */
1425int vmcore_add_device_dump(struct vmcoredd_data *data)
1426{
1427        struct vmcoredd_node *dump;
1428        void *buf = NULL;
1429        size_t data_size;
1430        int ret;
1431
1432        if (!data || !strlen(data->dump_name) ||
1433            !data->vmcoredd_callback || !data->size)
1434                return -EINVAL;
1435
1436        dump = vzalloc(sizeof(*dump));
1437        if (!dump) {
1438                ret = -ENOMEM;
1439                goto out_err;
1440        }
1441
1442        /* Keep size of the buffer page aligned so that it can be mmaped */
1443        data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1444                            PAGE_SIZE);
1445
1446        /* Allocate buffer for driver's to write their dumps */
1447        buf = vmcore_alloc_buf(data_size);
1448        if (!buf) {
1449                ret = -ENOMEM;
1450                goto out_err;
1451        }
1452
1453        vmcoredd_write_header(buf, data, data_size -
1454                              sizeof(struct vmcoredd_header));
1455
1456        /* Invoke the driver's dump collection routing */
1457        ret = data->vmcoredd_callback(data, buf +
1458                                      sizeof(struct vmcoredd_header));
1459        if (ret)
1460                goto out_err;
1461
1462        dump->buf = buf;
1463        dump->size = data_size;
1464
1465        /* Add the dump to driver sysfs list */
1466        mutex_lock(&vmcoredd_mutex);
1467        list_add_tail(&dump->list, &vmcoredd_list);
1468        mutex_unlock(&vmcoredd_mutex);
1469
1470        vmcoredd_update_size(data_size);
1471        return 0;
1472
1473out_err:
1474        if (buf)
1475                vfree(buf);
1476
1477        if (dump)
1478                vfree(dump);
1479
1480        return ret;
1481}
1482EXPORT_SYMBOL(vmcore_add_device_dump);
1483#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1484
1485/* Free all dumps in vmcore device dump list */
1486static void vmcore_free_device_dumps(void)
1487{
1488#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1489        mutex_lock(&vmcoredd_mutex);
1490        while (!list_empty(&vmcoredd_list)) {
1491                struct vmcoredd_node *dump;
1492
1493                dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1494                                        list);
1495                list_del(&dump->list);
1496                vfree(dump->buf);
1497                vfree(dump);
1498        }
1499        mutex_unlock(&vmcoredd_mutex);
1500#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1501}
1502
1503/* Init function for vmcore module. */
1504static int __init vmcore_init(void)
1505{
1506        int rc = 0;
1507
1508        /* Allow architectures to allocate ELF header in 2nd kernel */
1509        rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1510        if (rc)
1511                return rc;
1512        /*
1513         * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1514         * then capture the dump.
1515         */
1516        if (!(is_vmcore_usable()))
1517                return rc;
1518        rc = parse_crash_elf_headers();
1519        if (rc) {
1520                pr_warn("Kdump: vmcore not initialized\n");
1521                return rc;
1522        }
1523        elfcorehdr_free(elfcorehdr_addr);
1524        elfcorehdr_addr = ELFCORE_ADDR_ERR;
1525
1526        proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1527        if (proc_vmcore)
1528                proc_vmcore->size = vmcore_size;
1529        return 0;
1530}
1531fs_initcall(vmcore_init);
1532
1533/* Cleanup function for vmcore module. */
1534void vmcore_cleanup(void)
1535{
1536        if (proc_vmcore) {
1537                proc_remove(proc_vmcore);
1538                proc_vmcore = NULL;
1539        }
1540
1541        /* clear the vmcore list. */
1542        while (!list_empty(&vmcore_list)) {
1543                struct vmcore *m;
1544
1545                m = list_first_entry(&vmcore_list, struct vmcore, list);
1546                list_del(&m->list);
1547                kfree(m);
1548        }
1549        free_elfcorebuf();
1550
1551        /* clear vmcore device dump list */
1552        vmcore_free_device_dumps();
1553}
1554