linux/fs/pstore/platform.c
<<
>>
Prefs
   1/*
   2 * Persistent Storage - platform driver interface parts.
   3 *
   4 * Copyright (C) 2007-2008 Google, Inc.
   5 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
   6 *
   7 *  This program is free software; you can redistribute it and/or modify
   8 *  it under the terms of the GNU General Public License version 2 as
   9 *  published by the Free Software Foundation.
  10 *
  11 *  This program is distributed in the hope that it will be useful,
  12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 *  GNU General Public License for more details.
  15 *
  16 *  You should have received a copy of the GNU General Public License
  17 *  along with this program; if not, write to the Free Software
  18 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  19 */
  20
  21#define pr_fmt(fmt) "pstore: " fmt
  22
  23#include <linux/atomic.h>
  24#include <linux/types.h>
  25#include <linux/errno.h>
  26#include <linux/init.h>
  27#include <linux/kmsg_dump.h>
  28#include <linux/console.h>
  29#include <linux/module.h>
  30#include <linux/pstore.h>
  31#ifdef CONFIG_PSTORE_ZLIB_COMPRESS
  32#include <linux/zlib.h>
  33#endif
  34#ifdef CONFIG_PSTORE_LZO_COMPRESS
  35#include <linux/lzo.h>
  36#endif
  37#ifdef CONFIG_PSTORE_LZ4_COMPRESS
  38#include <linux/lz4.h>
  39#endif
  40#include <linux/string.h>
  41#include <linux/timer.h>
  42#include <linux/slab.h>
  43#include <linux/uaccess.h>
  44#include <linux/jiffies.h>
  45#include <linux/workqueue.h>
  46
  47#include "internal.h"
  48
  49/*
  50 * We defer making "oops" entries appear in pstore - see
  51 * whether the system is actually still running well enough
  52 * to let someone see the entry
  53 */
  54static int pstore_update_ms = -1;
  55module_param_named(update_ms, pstore_update_ms, int, 0600);
  56MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
  57                 "(default is -1, which means runtime updates are disabled; "
  58                 "enabling this option is not safe, it may lead to further "
  59                 "corruption on Oopses)");
  60
  61static int pstore_new_entry;
  62
  63static void pstore_timefunc(struct timer_list *);
  64static DEFINE_TIMER(pstore_timer, pstore_timefunc);
  65
  66static void pstore_dowork(struct work_struct *);
  67static DECLARE_WORK(pstore_work, pstore_dowork);
  68
  69/*
  70 * pstore_lock just protects "psinfo" during
  71 * calls to pstore_register()
  72 */
  73static DEFINE_SPINLOCK(pstore_lock);
  74struct pstore_info *psinfo;
  75
  76static char *backend;
  77
  78/* Compression parameters */
  79#ifdef CONFIG_PSTORE_ZLIB_COMPRESS
  80#define COMPR_LEVEL 6
  81#define WINDOW_BITS 12
  82#define MEM_LEVEL 4
  83static struct z_stream_s stream;
  84#else
  85static unsigned char *workspace;
  86#endif
  87
  88struct pstore_zbackend {
  89        int (*compress)(const void *in, void *out, size_t inlen, size_t outlen);
  90        int (*decompress)(void *in, void *out, size_t inlen, size_t outlen);
  91        void (*allocate)(void);
  92        void (*free)(void);
  93
  94        const char *name;
  95};
  96
  97static char *big_oops_buf;
  98static size_t big_oops_buf_sz;
  99
 100/* How much of the console log to snapshot */
 101unsigned long kmsg_bytes = PSTORE_DEFAULT_KMSG_BYTES;
 102
 103void pstore_set_kmsg_bytes(int bytes)
 104{
 105        kmsg_bytes = bytes;
 106}
 107
 108/* Tag each group of saved records with a sequence number */
 109static int      oopscount;
 110
 111static const char *get_reason_str(enum kmsg_dump_reason reason)
 112{
 113        switch (reason) {
 114        case KMSG_DUMP_PANIC:
 115                return "Panic";
 116        case KMSG_DUMP_OOPS:
 117                return "Oops";
 118        case KMSG_DUMP_EMERG:
 119                return "Emergency";
 120        case KMSG_DUMP_RESTART:
 121                return "Restart";
 122        case KMSG_DUMP_HALT:
 123                return "Halt";
 124        case KMSG_DUMP_POWEROFF:
 125                return "Poweroff";
 126        default:
 127                return "Unknown";
 128        }
 129}
 130
 131bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
 132{
 133        /*
 134         * In case of NMI path, pstore shouldn't be blocked
 135         * regardless of reason.
 136         */
 137        if (in_nmi())
 138                return true;
 139
 140        switch (reason) {
 141        /* In panic case, other cpus are stopped by smp_send_stop(). */
 142        case KMSG_DUMP_PANIC:
 143        /* Emergency restart shouldn't be blocked by spin lock. */
 144        case KMSG_DUMP_EMERG:
 145                return true;
 146        default:
 147                return false;
 148        }
 149}
 150EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
 151
 152#ifdef CONFIG_PSTORE_ZLIB_COMPRESS
 153/* Derived from logfs_compress() */
 154static int compress_zlib(const void *in, void *out, size_t inlen, size_t outlen)
 155{
 156        int err, ret;
 157
 158        ret = -EIO;
 159        err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
 160                                                MEM_LEVEL, Z_DEFAULT_STRATEGY);
 161        if (err != Z_OK)
 162                goto error;
 163
 164        stream.next_in = in;
 165        stream.avail_in = inlen;
 166        stream.total_in = 0;
 167        stream.next_out = out;
 168        stream.avail_out = outlen;
 169        stream.total_out = 0;
 170
 171        err = zlib_deflate(&stream, Z_FINISH);
 172        if (err != Z_STREAM_END)
 173                goto error;
 174
 175        err = zlib_deflateEnd(&stream);
 176        if (err != Z_OK)
 177                goto error;
 178
 179        if (stream.total_out >= stream.total_in)
 180                goto error;
 181
 182        ret = stream.total_out;
 183error:
 184        return ret;
 185}
 186
 187/* Derived from logfs_uncompress */
 188static int decompress_zlib(void *in, void *out, size_t inlen, size_t outlen)
 189{
 190        int err, ret;
 191
 192        ret = -EIO;
 193        err = zlib_inflateInit2(&stream, WINDOW_BITS);
 194        if (err != Z_OK)
 195                goto error;
 196
 197        stream.next_in = in;
 198        stream.avail_in = inlen;
 199        stream.total_in = 0;
 200        stream.next_out = out;
 201        stream.avail_out = outlen;
 202        stream.total_out = 0;
 203
 204        err = zlib_inflate(&stream, Z_FINISH);
 205        if (err != Z_STREAM_END)
 206                goto error;
 207
 208        err = zlib_inflateEnd(&stream);
 209        if (err != Z_OK)
 210                goto error;
 211
 212        ret = stream.total_out;
 213error:
 214        return ret;
 215}
 216
 217static void allocate_zlib(void)
 218{
 219        size_t size;
 220        size_t cmpr;
 221
 222        switch (psinfo->bufsize) {
 223        /* buffer range for efivars */
 224        case 1000 ... 2000:
 225                cmpr = 56;
 226                break;
 227        case 2001 ... 3000:
 228                cmpr = 54;
 229                break;
 230        case 3001 ... 3999:
 231                cmpr = 52;
 232                break;
 233        /* buffer range for nvram, erst */
 234        case 4000 ... 10000:
 235                cmpr = 45;
 236                break;
 237        default:
 238                cmpr = 60;
 239                break;
 240        }
 241
 242        big_oops_buf_sz = (psinfo->bufsize * 100) / cmpr;
 243        big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
 244        if (big_oops_buf) {
 245                size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
 246                        zlib_inflate_workspacesize());
 247                stream.workspace = kmalloc(size, GFP_KERNEL);
 248                if (!stream.workspace) {
 249                        pr_err("No memory for compression workspace; skipping compression\n");
 250                        kfree(big_oops_buf);
 251                        big_oops_buf = NULL;
 252                }
 253        } else {
 254                pr_err("No memory for uncompressed data; skipping compression\n");
 255                stream.workspace = NULL;
 256        }
 257
 258}
 259
 260static void free_zlib(void)
 261{
 262        kfree(stream.workspace);
 263        stream.workspace = NULL;
 264        kfree(big_oops_buf);
 265        big_oops_buf = NULL;
 266        big_oops_buf_sz = 0;
 267}
 268
 269static const struct pstore_zbackend backend_zlib = {
 270        .compress       = compress_zlib,
 271        .decompress     = decompress_zlib,
 272        .allocate       = allocate_zlib,
 273        .free           = free_zlib,
 274        .name           = "zlib",
 275};
 276#endif
 277
 278#ifdef CONFIG_PSTORE_LZO_COMPRESS
 279static int compress_lzo(const void *in, void *out, size_t inlen, size_t outlen)
 280{
 281        int ret;
 282
 283        ret = lzo1x_1_compress(in, inlen, out, &outlen, workspace);
 284        if (ret != LZO_E_OK) {
 285                pr_err("lzo_compress error, ret = %d!\n", ret);
 286                return -EIO;
 287        }
 288
 289        return outlen;
 290}
 291
 292static int decompress_lzo(void *in, void *out, size_t inlen, size_t outlen)
 293{
 294        int ret;
 295
 296        ret = lzo1x_decompress_safe(in, inlen, out, &outlen);
 297        if (ret != LZO_E_OK) {
 298                pr_err("lzo_decompress error, ret = %d!\n", ret);
 299                return -EIO;
 300        }
 301
 302        return outlen;
 303}
 304
 305static void allocate_lzo(void)
 306{
 307        big_oops_buf_sz = lzo1x_worst_compress(psinfo->bufsize);
 308        big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
 309        if (big_oops_buf) {
 310                workspace = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
 311                if (!workspace) {
 312                        pr_err("No memory for compression workspace; skipping compression\n");
 313                        kfree(big_oops_buf);
 314                        big_oops_buf = NULL;
 315                }
 316        } else {
 317                pr_err("No memory for uncompressed data; skipping compression\n");
 318                workspace = NULL;
 319        }
 320}
 321
 322static void free_lzo(void)
 323{
 324        kfree(workspace);
 325        kfree(big_oops_buf);
 326        big_oops_buf = NULL;
 327        big_oops_buf_sz = 0;
 328}
 329
 330static const struct pstore_zbackend backend_lzo = {
 331        .compress       = compress_lzo,
 332        .decompress     = decompress_lzo,
 333        .allocate       = allocate_lzo,
 334        .free           = free_lzo,
 335        .name           = "lzo",
 336};
 337#endif
 338
 339#ifdef CONFIG_PSTORE_LZ4_COMPRESS
 340static int compress_lz4(const void *in, void *out, size_t inlen, size_t outlen)
 341{
 342        int ret;
 343
 344        ret = LZ4_compress_default(in, out, inlen, outlen, workspace);
 345        if (!ret) {
 346                pr_err("LZ4_compress_default error; compression failed!\n");
 347                return -EIO;
 348        }
 349
 350        return ret;
 351}
 352
 353static int decompress_lz4(void *in, void *out, size_t inlen, size_t outlen)
 354{
 355        int ret;
 356
 357        ret = LZ4_decompress_safe(in, out, inlen, outlen);
 358        if (ret < 0) {
 359                /*
 360                 * LZ4_decompress_safe will return an error code
 361                 * (< 0) if decompression failed
 362                 */
 363                pr_err("LZ4_decompress_safe error, ret = %d!\n", ret);
 364                return -EIO;
 365        }
 366
 367        return ret;
 368}
 369
 370static void allocate_lz4(void)
 371{
 372        big_oops_buf_sz = LZ4_compressBound(psinfo->bufsize);
 373        big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
 374        if (big_oops_buf) {
 375                workspace = kmalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
 376                if (!workspace) {
 377                        pr_err("No memory for compression workspace; skipping compression\n");
 378                        kfree(big_oops_buf);
 379                        big_oops_buf = NULL;
 380                }
 381        } else {
 382                pr_err("No memory for uncompressed data; skipping compression\n");
 383                workspace = NULL;
 384        }
 385}
 386
 387static void free_lz4(void)
 388{
 389        kfree(workspace);
 390        kfree(big_oops_buf);
 391        big_oops_buf = NULL;
 392        big_oops_buf_sz = 0;
 393}
 394
 395static const struct pstore_zbackend backend_lz4 = {
 396        .compress       = compress_lz4,
 397        .decompress     = decompress_lz4,
 398        .allocate       = allocate_lz4,
 399        .free           = free_lz4,
 400        .name           = "lz4",
 401};
 402#endif
 403
 404static const struct pstore_zbackend *zbackend =
 405#if defined(CONFIG_PSTORE_ZLIB_COMPRESS)
 406        &backend_zlib;
 407#elif defined(CONFIG_PSTORE_LZO_COMPRESS)
 408        &backend_lzo;
 409#elif defined(CONFIG_PSTORE_LZ4_COMPRESS)
 410        &backend_lz4;
 411#else
 412        NULL;
 413#endif
 414
 415static int pstore_compress(const void *in, void *out,
 416                           size_t inlen, size_t outlen)
 417{
 418        if (zbackend)
 419                return zbackend->compress(in, out, inlen, outlen);
 420        else
 421                return -EIO;
 422}
 423
 424static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
 425{
 426        if (zbackend)
 427                return zbackend->decompress(in, out, inlen, outlen);
 428        else
 429                return -EIO;
 430}
 431
 432static void allocate_buf_for_compression(void)
 433{
 434        if (zbackend) {
 435                pr_info("using %s compression\n", zbackend->name);
 436                zbackend->allocate();
 437        } else {
 438                pr_err("allocate compression buffer error!\n");
 439        }
 440}
 441
 442static void free_buf_for_compression(void)
 443{
 444        if (zbackend)
 445                zbackend->free();
 446        else
 447                pr_err("free compression buffer error!\n");
 448}
 449
 450/*
 451 * Called when compression fails, since the printk buffer
 452 * would be fetched for compression calling it again when
 453 * compression fails would have moved the iterator of
 454 * printk buffer which results in fetching old contents.
 455 * Copy the recent messages from big_oops_buf to psinfo->buf
 456 */
 457static size_t copy_kmsg_to_buffer(int hsize, size_t len)
 458{
 459        size_t total_len;
 460        size_t diff;
 461
 462        total_len = hsize + len;
 463
 464        if (total_len > psinfo->bufsize) {
 465                diff = total_len - psinfo->bufsize + hsize;
 466                memcpy(psinfo->buf, big_oops_buf, hsize);
 467                memcpy(psinfo->buf + hsize, big_oops_buf + diff,
 468                                        psinfo->bufsize - hsize);
 469                total_len = psinfo->bufsize;
 470        } else
 471                memcpy(psinfo->buf, big_oops_buf, total_len);
 472
 473        return total_len;
 474}
 475
 476void pstore_record_init(struct pstore_record *record,
 477                        struct pstore_info *psinfo)
 478{
 479        memset(record, 0, sizeof(*record));
 480
 481        record->psi = psinfo;
 482
 483        /* Report zeroed timestamp if called before timekeeping has resumed. */
 484        record->time = ns_to_timespec(ktime_get_real_fast_ns());
 485}
 486
 487/*
 488 * callback from kmsg_dump. (s2,l2) has the most recently
 489 * written bytes, older bytes are in (s1,l1). Save as much
 490 * as we can from the end of the buffer.
 491 */
 492static void pstore_dump(struct kmsg_dumper *dumper,
 493                        enum kmsg_dump_reason reason)
 494{
 495        unsigned long   total = 0;
 496        const char      *why;
 497        unsigned int    part = 1;
 498        unsigned long   flags = 0;
 499        int             is_locked;
 500        int             ret;
 501
 502        why = get_reason_str(reason);
 503
 504        if (pstore_cannot_block_path(reason)) {
 505                is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
 506                if (!is_locked) {
 507                        pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
 508                                       , in_nmi() ? "NMI" : why);
 509                        return;
 510                }
 511        } else {
 512                spin_lock_irqsave(&psinfo->buf_lock, flags);
 513                is_locked = 1;
 514        }
 515        oopscount++;
 516        while (total < kmsg_bytes) {
 517                char *dst;
 518                size_t dst_size;
 519                int header_size;
 520                int zipped_len = -1;
 521                size_t dump_size;
 522                struct pstore_record record;
 523
 524                pstore_record_init(&record, psinfo);
 525                record.type = PSTORE_TYPE_DMESG;
 526                record.count = oopscount;
 527                record.reason = reason;
 528                record.part = part;
 529                record.buf = psinfo->buf;
 530
 531                if (big_oops_buf && is_locked) {
 532                        dst = big_oops_buf;
 533                        dst_size = big_oops_buf_sz;
 534                } else {
 535                        dst = psinfo->buf;
 536                        dst_size = psinfo->bufsize;
 537                }
 538
 539                /* Write dump header. */
 540                header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
 541                                 oopscount, part);
 542                dst_size -= header_size;
 543
 544                /* Write dump contents. */
 545                if (!kmsg_dump_get_buffer(dumper, true, dst + header_size,
 546                                          dst_size, &dump_size))
 547                        break;
 548
 549                if (big_oops_buf && is_locked) {
 550                        zipped_len = pstore_compress(dst, psinfo->buf,
 551                                                header_size + dump_size,
 552                                                psinfo->bufsize);
 553
 554                        if (zipped_len > 0) {
 555                                record.compressed = true;
 556                                record.size = zipped_len;
 557                        } else {
 558                                record.size = copy_kmsg_to_buffer(header_size,
 559                                                                  dump_size);
 560                        }
 561                } else {
 562                        record.size = header_size + dump_size;
 563                }
 564
 565                ret = psinfo->write(&record);
 566                if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
 567                        pstore_new_entry = 1;
 568
 569                total += record.size;
 570                part++;
 571        }
 572        if (is_locked)
 573                spin_unlock_irqrestore(&psinfo->buf_lock, flags);
 574}
 575
 576static struct kmsg_dumper pstore_dumper = {
 577        .dump = pstore_dump,
 578};
 579
 580/*
 581 * Register with kmsg_dump to save last part of console log on panic.
 582 */
 583static void pstore_register_kmsg(void)
 584{
 585        kmsg_dump_register(&pstore_dumper);
 586}
 587
 588static void pstore_unregister_kmsg(void)
 589{
 590        kmsg_dump_unregister(&pstore_dumper);
 591}
 592
 593#ifdef CONFIG_PSTORE_CONSOLE
 594static void pstore_console_write(struct console *con, const char *s, unsigned c)
 595{
 596        const char *e = s + c;
 597
 598        while (s < e) {
 599                struct pstore_record record;
 600                unsigned long flags;
 601
 602                pstore_record_init(&record, psinfo);
 603                record.type = PSTORE_TYPE_CONSOLE;
 604
 605                if (c > psinfo->bufsize)
 606                        c = psinfo->bufsize;
 607
 608                if (oops_in_progress) {
 609                        if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
 610                                break;
 611                } else {
 612                        spin_lock_irqsave(&psinfo->buf_lock, flags);
 613                }
 614                record.buf = (char *)s;
 615                record.size = c;
 616                psinfo->write(&record);
 617                spin_unlock_irqrestore(&psinfo->buf_lock, flags);
 618                s += c;
 619                c = e - s;
 620        }
 621}
 622
 623static struct console pstore_console = {
 624        .name   = "pstore",
 625        .write  = pstore_console_write,
 626        .flags  = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
 627        .index  = -1,
 628};
 629
 630static void pstore_register_console(void)
 631{
 632        register_console(&pstore_console);
 633}
 634
 635static void pstore_unregister_console(void)
 636{
 637        unregister_console(&pstore_console);
 638}
 639#else
 640static void pstore_register_console(void) {}
 641static void pstore_unregister_console(void) {}
 642#endif
 643
 644static int pstore_write_user_compat(struct pstore_record *record,
 645                                    const char __user *buf)
 646{
 647        int ret = 0;
 648
 649        if (record->buf)
 650                return -EINVAL;
 651
 652        record->buf = memdup_user(buf, record->size);
 653        if (IS_ERR(record->buf)) {
 654                ret = PTR_ERR(record->buf);
 655                goto out;
 656        }
 657
 658        ret = record->psi->write(record);
 659
 660        kfree(record->buf);
 661out:
 662        record->buf = NULL;
 663
 664        return unlikely(ret < 0) ? ret : record->size;
 665}
 666
 667/*
 668 * platform specific persistent storage driver registers with
 669 * us here. If pstore is already mounted, call the platform
 670 * read function right away to populate the file system. If not
 671 * then the pstore mount code will call us later to fill out
 672 * the file system.
 673 */
 674int pstore_register(struct pstore_info *psi)
 675{
 676        struct module *owner = psi->owner;
 677
 678        if (backend && strcmp(backend, psi->name)) {
 679                pr_warn("ignoring unexpected backend '%s'\n", psi->name);
 680                return -EPERM;
 681        }
 682
 683        /* Sanity check flags. */
 684        if (!psi->flags) {
 685                pr_warn("backend '%s' must support at least one frontend\n",
 686                        psi->name);
 687                return -EINVAL;
 688        }
 689
 690        /* Check for required functions. */
 691        if (!psi->read || !psi->write) {
 692                pr_warn("backend '%s' must implement read() and write()\n",
 693                        psi->name);
 694                return -EINVAL;
 695        }
 696
 697        spin_lock(&pstore_lock);
 698        if (psinfo) {
 699                pr_warn("backend '%s' already loaded: ignoring '%s'\n",
 700                        psinfo->name, psi->name);
 701                spin_unlock(&pstore_lock);
 702                return -EBUSY;
 703        }
 704
 705        if (!psi->write_user)
 706                psi->write_user = pstore_write_user_compat;
 707        psinfo = psi;
 708        mutex_init(&psinfo->read_mutex);
 709        spin_unlock(&pstore_lock);
 710
 711        if (owner && !try_module_get(owner)) {
 712                psinfo = NULL;
 713                return -EINVAL;
 714        }
 715
 716        allocate_buf_for_compression();
 717
 718        if (pstore_is_mounted())
 719                pstore_get_records(0);
 720
 721        if (psi->flags & PSTORE_FLAGS_DMESG)
 722                pstore_register_kmsg();
 723        if (psi->flags & PSTORE_FLAGS_CONSOLE)
 724                pstore_register_console();
 725        if (psi->flags & PSTORE_FLAGS_FTRACE)
 726                pstore_register_ftrace();
 727        if (psi->flags & PSTORE_FLAGS_PMSG)
 728                pstore_register_pmsg();
 729
 730        /* Start watching for new records, if desired. */
 731        if (pstore_update_ms >= 0) {
 732                pstore_timer.expires = jiffies +
 733                        msecs_to_jiffies(pstore_update_ms);
 734                add_timer(&pstore_timer);
 735        }
 736
 737        /*
 738         * Update the module parameter backend, so it is visible
 739         * through /sys/module/pstore/parameters/backend
 740         */
 741        backend = psi->name;
 742
 743        pr_info("Registered %s as persistent store backend\n", psi->name);
 744
 745        module_put(owner);
 746
 747        return 0;
 748}
 749EXPORT_SYMBOL_GPL(pstore_register);
 750
 751void pstore_unregister(struct pstore_info *psi)
 752{
 753        /* Stop timer and make sure all work has finished. */
 754        pstore_update_ms = -1;
 755        del_timer_sync(&pstore_timer);
 756        flush_work(&pstore_work);
 757
 758        if (psi->flags & PSTORE_FLAGS_PMSG)
 759                pstore_unregister_pmsg();
 760        if (psi->flags & PSTORE_FLAGS_FTRACE)
 761                pstore_unregister_ftrace();
 762        if (psi->flags & PSTORE_FLAGS_CONSOLE)
 763                pstore_unregister_console();
 764        if (psi->flags & PSTORE_FLAGS_DMESG)
 765                pstore_unregister_kmsg();
 766
 767        free_buf_for_compression();
 768
 769        psinfo = NULL;
 770        backend = NULL;
 771}
 772EXPORT_SYMBOL_GPL(pstore_unregister);
 773
 774static void decompress_record(struct pstore_record *record)
 775{
 776        int unzipped_len;
 777        char *decompressed;
 778
 779        if (!record->compressed)
 780                return;
 781
 782        /* Only PSTORE_TYPE_DMESG support compression. */
 783        if (record->type != PSTORE_TYPE_DMESG) {
 784                pr_warn("ignored compressed record type %d\n", record->type);
 785                return;
 786        }
 787
 788        /* No compression method has created the common buffer. */
 789        if (!big_oops_buf) {
 790                pr_warn("no decompression buffer allocated\n");
 791                return;
 792        }
 793
 794        unzipped_len = pstore_decompress(record->buf, big_oops_buf,
 795                                         record->size, big_oops_buf_sz);
 796        if (unzipped_len <= 0) {
 797                pr_err("decompression failed: %d\n", unzipped_len);
 798                return;
 799        }
 800
 801        /* Build new buffer for decompressed contents. */
 802        decompressed = kmalloc(unzipped_len + record->ecc_notice_size,
 803                               GFP_KERNEL);
 804        if (!decompressed) {
 805                pr_err("decompression ran out of memory\n");
 806                return;
 807        }
 808        memcpy(decompressed, big_oops_buf, unzipped_len);
 809
 810        /* Append ECC notice to decompressed buffer. */
 811        memcpy(decompressed + unzipped_len, record->buf + record->size,
 812               record->ecc_notice_size);
 813
 814        /* Swap out compresed contents with decompressed contents. */
 815        kfree(record->buf);
 816        record->buf = decompressed;
 817        record->size = unzipped_len;
 818        record->compressed = false;
 819}
 820
 821/*
 822 * Read all the records from one persistent store backend. Create
 823 * files in our filesystem.  Don't warn about -EEXIST errors
 824 * when we are re-scanning the backing store looking to add new
 825 * error records.
 826 */
 827void pstore_get_backend_records(struct pstore_info *psi,
 828                                struct dentry *root, int quiet)
 829{
 830        int failed = 0;
 831        unsigned int stop_loop = 65536;
 832
 833        if (!psi || !root)
 834                return;
 835
 836        mutex_lock(&psi->read_mutex);
 837        if (psi->open && psi->open(psi))
 838                goto out;
 839
 840        /*
 841         * Backend callback read() allocates record.buf. decompress_record()
 842         * may reallocate record.buf. On success, pstore_mkfile() will keep
 843         * the record.buf, so free it only on failure.
 844         */
 845        for (; stop_loop; stop_loop--) {
 846                struct pstore_record *record;
 847                int rc;
 848
 849                record = kzalloc(sizeof(*record), GFP_KERNEL);
 850                if (!record) {
 851                        pr_err("out of memory creating record\n");
 852                        break;
 853                }
 854                pstore_record_init(record, psi);
 855
 856                record->size = psi->read(record);
 857
 858                /* No more records left in backend? */
 859                if (record->size <= 0) {
 860                        kfree(record);
 861                        break;
 862                }
 863
 864                decompress_record(record);
 865                rc = pstore_mkfile(root, record);
 866                if (rc) {
 867                        /* pstore_mkfile() did not take record, so free it. */
 868                        kfree(record->buf);
 869                        kfree(record);
 870                        if (rc != -EEXIST || !quiet)
 871                                failed++;
 872                }
 873        }
 874        if (psi->close)
 875                psi->close(psi);
 876out:
 877        mutex_unlock(&psi->read_mutex);
 878
 879        if (failed)
 880                pr_warn("failed to create %d record(s) from '%s'\n",
 881                        failed, psi->name);
 882        if (!stop_loop)
 883                pr_err("looping? Too many records seen from '%s'\n",
 884                        psi->name);
 885}
 886
 887static void pstore_dowork(struct work_struct *work)
 888{
 889        pstore_get_records(1);
 890}
 891
 892static void pstore_timefunc(struct timer_list *unused)
 893{
 894        if (pstore_new_entry) {
 895                pstore_new_entry = 0;
 896                schedule_work(&pstore_work);
 897        }
 898
 899        if (pstore_update_ms >= 0)
 900                mod_timer(&pstore_timer,
 901                          jiffies + msecs_to_jiffies(pstore_update_ms));
 902}
 903
 904module_param(backend, charp, 0444);
 905MODULE_PARM_DESC(backend, "Pstore backend to use");
 906