linux/fs/pstore/ram_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 Google, Inc.
   4 */
   5
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7
   8#include <linux/device.h>
   9#include <linux/err.h>
  10#include <linux/errno.h>
  11#include <linux/init.h>
  12#include <linux/io.h>
  13#include <linux/kernel.h>
  14#include <linux/list.h>
  15#include <linux/memblock.h>
  16#include <linux/pstore_ram.h>
  17#include <linux/rslib.h>
  18#include <linux/slab.h>
  19#include <linux/uaccess.h>
  20#include <linux/vmalloc.h>
  21#include <asm/page.h>
  22
  23/**
  24 * struct persistent_ram_buffer - persistent circular RAM buffer
  25 *
  26 * @sig:
  27 *      signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value)
  28 * @start:
  29 *      offset into @data where the beginning of the stored bytes begin
  30 * @size:
  31 *      number of valid bytes stored in @data
  32 */
  33struct persistent_ram_buffer {
  34        uint32_t    sig;
  35        atomic_t    start;
  36        atomic_t    size;
  37        uint8_t     data[0];
  38};
  39
  40#define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
  41
  42static inline size_t buffer_size(struct persistent_ram_zone *prz)
  43{
  44        return atomic_read(&prz->buffer->size);
  45}
  46
  47static inline size_t buffer_start(struct persistent_ram_zone *prz)
  48{
  49        return atomic_read(&prz->buffer->start);
  50}
  51
  52/* increase and wrap the start pointer, returning the old value */
  53static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
  54{
  55        int old;
  56        int new;
  57        unsigned long flags = 0;
  58
  59        if (!(prz->flags & PRZ_FLAG_NO_LOCK))
  60                raw_spin_lock_irqsave(&prz->buffer_lock, flags);
  61
  62        old = atomic_read(&prz->buffer->start);
  63        new = old + a;
  64        while (unlikely(new >= prz->buffer_size))
  65                new -= prz->buffer_size;
  66        atomic_set(&prz->buffer->start, new);
  67
  68        if (!(prz->flags & PRZ_FLAG_NO_LOCK))
  69                raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
  70
  71        return old;
  72}
  73
  74/* increase the size counter until it hits the max size */
  75static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
  76{
  77        size_t old;
  78        size_t new;
  79        unsigned long flags = 0;
  80
  81        if (!(prz->flags & PRZ_FLAG_NO_LOCK))
  82                raw_spin_lock_irqsave(&prz->buffer_lock, flags);
  83
  84        old = atomic_read(&prz->buffer->size);
  85        if (old == prz->buffer_size)
  86                goto exit;
  87
  88        new = old + a;
  89        if (new > prz->buffer_size)
  90                new = prz->buffer_size;
  91        atomic_set(&prz->buffer->size, new);
  92
  93exit:
  94        if (!(prz->flags & PRZ_FLAG_NO_LOCK))
  95                raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
  96}
  97
  98static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
  99        uint8_t *data, size_t len, uint8_t *ecc)
 100{
 101        int i;
 102
 103        /* Initialize the parity buffer */
 104        memset(prz->ecc_info.par, 0,
 105               prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0]));
 106        encode_rs8(prz->rs_decoder, data, len, prz->ecc_info.par, 0);
 107        for (i = 0; i < prz->ecc_info.ecc_size; i++)
 108                ecc[i] = prz->ecc_info.par[i];
 109}
 110
 111static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
 112        void *data, size_t len, uint8_t *ecc)
 113{
 114        int i;
 115
 116        for (i = 0; i < prz->ecc_info.ecc_size; i++)
 117                prz->ecc_info.par[i] = ecc[i];
 118        return decode_rs8(prz->rs_decoder, data, prz->ecc_info.par, len,
 119                                NULL, 0, NULL, 0, NULL);
 120}
 121
 122static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
 123        unsigned int start, unsigned int count)
 124{
 125        struct persistent_ram_buffer *buffer = prz->buffer;
 126        uint8_t *buffer_end = buffer->data + prz->buffer_size;
 127        uint8_t *block;
 128        uint8_t *par;
 129        int ecc_block_size = prz->ecc_info.block_size;
 130        int ecc_size = prz->ecc_info.ecc_size;
 131        int size = ecc_block_size;
 132
 133        if (!ecc_size)
 134                return;
 135
 136        block = buffer->data + (start & ~(ecc_block_size - 1));
 137        par = prz->par_buffer + (start / ecc_block_size) * ecc_size;
 138
 139        do {
 140                if (block + ecc_block_size > buffer_end)
 141                        size = buffer_end - block;
 142                persistent_ram_encode_rs8(prz, block, size, par);
 143                block += ecc_block_size;
 144                par += ecc_size;
 145        } while (block < buffer->data + start + count);
 146}
 147
 148static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
 149{
 150        struct persistent_ram_buffer *buffer = prz->buffer;
 151
 152        if (!prz->ecc_info.ecc_size)
 153                return;
 154
 155        persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
 156                                  prz->par_header);
 157}
 158
 159static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
 160{
 161        struct persistent_ram_buffer *buffer = prz->buffer;
 162        uint8_t *block;
 163        uint8_t *par;
 164
 165        if (!prz->ecc_info.ecc_size)
 166                return;
 167
 168        block = buffer->data;
 169        par = prz->par_buffer;
 170        while (block < buffer->data + buffer_size(prz)) {
 171                int numerr;
 172                int size = prz->ecc_info.block_size;
 173                if (block + size > buffer->data + prz->buffer_size)
 174                        size = buffer->data + prz->buffer_size - block;
 175                numerr = persistent_ram_decode_rs8(prz, block, size, par);
 176                if (numerr > 0) {
 177                        pr_devel("error in block %p, %d\n", block, numerr);
 178                        prz->corrected_bytes += numerr;
 179                } else if (numerr < 0) {
 180                        pr_devel("uncorrectable error in block %p\n", block);
 181                        prz->bad_blocks++;
 182                }
 183                block += prz->ecc_info.block_size;
 184                par += prz->ecc_info.ecc_size;
 185        }
 186}
 187
 188static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
 189                                   struct persistent_ram_ecc_info *ecc_info)
 190{
 191        int numerr;
 192        struct persistent_ram_buffer *buffer = prz->buffer;
 193        int ecc_blocks;
 194        size_t ecc_total;
 195
 196        if (!ecc_info || !ecc_info->ecc_size)
 197                return 0;
 198
 199        prz->ecc_info.block_size = ecc_info->block_size ?: 128;
 200        prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16;
 201        prz->ecc_info.symsize = ecc_info->symsize ?: 8;
 202        prz->ecc_info.poly = ecc_info->poly ?: 0x11d;
 203
 204        ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size,
 205                                  prz->ecc_info.block_size +
 206                                  prz->ecc_info.ecc_size);
 207        ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size;
 208        if (ecc_total >= prz->buffer_size) {
 209                pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n",
 210                       __func__, prz->ecc_info.ecc_size,
 211                       ecc_total, prz->buffer_size);
 212                return -EINVAL;
 213        }
 214
 215        prz->buffer_size -= ecc_total;
 216        prz->par_buffer = buffer->data + prz->buffer_size;
 217        prz->par_header = prz->par_buffer +
 218                          ecc_blocks * prz->ecc_info.ecc_size;
 219
 220        /*
 221         * first consecutive root is 0
 222         * primitive element to generate roots = 1
 223         */
 224        prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly,
 225                                  0, 1, prz->ecc_info.ecc_size);
 226        if (prz->rs_decoder == NULL) {
 227                pr_info("init_rs failed\n");
 228                return -EINVAL;
 229        }
 230
 231        /* allocate workspace instead of using stack VLA */
 232        prz->ecc_info.par = kmalloc_array(prz->ecc_info.ecc_size,
 233                                          sizeof(*prz->ecc_info.par),
 234                                          GFP_KERNEL);
 235        if (!prz->ecc_info.par) {
 236                pr_err("cannot allocate ECC parity workspace\n");
 237                return -ENOMEM;
 238        }
 239
 240        prz->corrected_bytes = 0;
 241        prz->bad_blocks = 0;
 242
 243        numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
 244                                           prz->par_header);
 245        if (numerr > 0) {
 246                pr_info("error in header, %d\n", numerr);
 247                prz->corrected_bytes += numerr;
 248        } else if (numerr < 0) {
 249                pr_info("uncorrectable error in header\n");
 250                prz->bad_blocks++;
 251        }
 252
 253        return 0;
 254}
 255
 256ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
 257        char *str, size_t len)
 258{
 259        ssize_t ret;
 260
 261        if (!prz->ecc_info.ecc_size)
 262                return 0;
 263
 264        if (prz->corrected_bytes || prz->bad_blocks)
 265                ret = snprintf(str, len, ""
 266                        "\n%d Corrected bytes, %d unrecoverable blocks\n",
 267                        prz->corrected_bytes, prz->bad_blocks);
 268        else
 269                ret = snprintf(str, len, "\nNo errors detected\n");
 270
 271        return ret;
 272}
 273
 274static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
 275        const void *s, unsigned int start, unsigned int count)
 276{
 277        struct persistent_ram_buffer *buffer = prz->buffer;
 278        memcpy_toio(buffer->data + start, s, count);
 279        persistent_ram_update_ecc(prz, start, count);
 280}
 281
 282static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
 283        const void __user *s, unsigned int start, unsigned int count)
 284{
 285        struct persistent_ram_buffer *buffer = prz->buffer;
 286        int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ?
 287                -EFAULT : 0;
 288        persistent_ram_update_ecc(prz, start, count);
 289        return ret;
 290}
 291
 292void persistent_ram_save_old(struct persistent_ram_zone *prz)
 293{
 294        struct persistent_ram_buffer *buffer = prz->buffer;
 295        size_t size = buffer_size(prz);
 296        size_t start = buffer_start(prz);
 297
 298        if (!size)
 299                return;
 300
 301        if (!prz->old_log) {
 302                persistent_ram_ecc_old(prz);
 303                prz->old_log = kmalloc(size, GFP_KERNEL);
 304        }
 305        if (!prz->old_log) {
 306                pr_err("failed to allocate buffer\n");
 307                return;
 308        }
 309
 310        prz->old_log_size = size;
 311        memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
 312        memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
 313}
 314
 315int notrace persistent_ram_write(struct persistent_ram_zone *prz,
 316        const void *s, unsigned int count)
 317{
 318        int rem;
 319        int c = count;
 320        size_t start;
 321
 322        if (unlikely(c > prz->buffer_size)) {
 323                s += c - prz->buffer_size;
 324                c = prz->buffer_size;
 325        }
 326
 327        buffer_size_add(prz, c);
 328
 329        start = buffer_start_add(prz, c);
 330
 331        rem = prz->buffer_size - start;
 332        if (unlikely(rem < c)) {
 333                persistent_ram_update(prz, s, start, rem);
 334                s += rem;
 335                c -= rem;
 336                start = 0;
 337        }
 338        persistent_ram_update(prz, s, start, c);
 339
 340        persistent_ram_update_header_ecc(prz);
 341
 342        return count;
 343}
 344
 345int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
 346        const void __user *s, unsigned int count)
 347{
 348        int rem, ret = 0, c = count;
 349        size_t start;
 350
 351        if (unlikely(!access_ok(s, count)))
 352                return -EFAULT;
 353        if (unlikely(c > prz->buffer_size)) {
 354                s += c - prz->buffer_size;
 355                c = prz->buffer_size;
 356        }
 357
 358        buffer_size_add(prz, c);
 359
 360        start = buffer_start_add(prz, c);
 361
 362        rem = prz->buffer_size - start;
 363        if (unlikely(rem < c)) {
 364                ret = persistent_ram_update_user(prz, s, start, rem);
 365                s += rem;
 366                c -= rem;
 367                start = 0;
 368        }
 369        if (likely(!ret))
 370                ret = persistent_ram_update_user(prz, s, start, c);
 371
 372        persistent_ram_update_header_ecc(prz);
 373
 374        return unlikely(ret) ? ret : count;
 375}
 376
 377size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
 378{
 379        return prz->old_log_size;
 380}
 381
 382void *persistent_ram_old(struct persistent_ram_zone *prz)
 383{
 384        return prz->old_log;
 385}
 386
 387void persistent_ram_free_old(struct persistent_ram_zone *prz)
 388{
 389        kfree(prz->old_log);
 390        prz->old_log = NULL;
 391        prz->old_log_size = 0;
 392}
 393
 394void persistent_ram_zap(struct persistent_ram_zone *prz)
 395{
 396        atomic_set(&prz->buffer->start, 0);
 397        atomic_set(&prz->buffer->size, 0);
 398        persistent_ram_update_header_ecc(prz);
 399}
 400
 401static void *persistent_ram_vmap(phys_addr_t start, size_t size,
 402                unsigned int memtype)
 403{
 404        struct page **pages;
 405        phys_addr_t page_start;
 406        unsigned int page_count;
 407        pgprot_t prot;
 408        unsigned int i;
 409        void *vaddr;
 410
 411        page_start = start - offset_in_page(start);
 412        page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
 413
 414        if (memtype)
 415                prot = pgprot_noncached(PAGE_KERNEL);
 416        else
 417                prot = pgprot_writecombine(PAGE_KERNEL);
 418
 419        pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
 420        if (!pages) {
 421                pr_err("%s: Failed to allocate array for %u pages\n",
 422                       __func__, page_count);
 423                return NULL;
 424        }
 425
 426        for (i = 0; i < page_count; i++) {
 427                phys_addr_t addr = page_start + i * PAGE_SIZE;
 428                pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
 429        }
 430        vaddr = vmap(pages, page_count, VM_MAP, prot);
 431        kfree(pages);
 432
 433        /*
 434         * Since vmap() uses page granularity, we must add the offset
 435         * into the page here, to get the byte granularity address
 436         * into the mapping to represent the actual "start" location.
 437         */
 438        return vaddr + offset_in_page(start);
 439}
 440
 441static void *persistent_ram_iomap(phys_addr_t start, size_t size,
 442                unsigned int memtype, char *label)
 443{
 444        void *va;
 445
 446        if (!request_mem_region(start, size, label ?: "ramoops")) {
 447                pr_err("request mem region (%s 0x%llx@0x%llx) failed\n",
 448                        label ?: "ramoops",
 449                        (unsigned long long)size, (unsigned long long)start);
 450                return NULL;
 451        }
 452
 453        if (memtype)
 454                va = ioremap(start, size);
 455        else
 456                va = ioremap_wc(start, size);
 457
 458        /*
 459         * Since request_mem_region() and ioremap() are byte-granularity
 460         * there is no need handle anything special like we do when the
 461         * vmap() case in persistent_ram_vmap() above.
 462         */
 463        return va;
 464}
 465
 466static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
 467                struct persistent_ram_zone *prz, int memtype)
 468{
 469        prz->paddr = start;
 470        prz->size = size;
 471
 472        if (pfn_valid(start >> PAGE_SHIFT))
 473                prz->vaddr = persistent_ram_vmap(start, size, memtype);
 474        else
 475                prz->vaddr = persistent_ram_iomap(start, size, memtype,
 476                                                  prz->label);
 477
 478        if (!prz->vaddr) {
 479                pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
 480                        (unsigned long long)size, (unsigned long long)start);
 481                return -ENOMEM;
 482        }
 483
 484        prz->buffer = prz->vaddr;
 485        prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
 486
 487        return 0;
 488}
 489
 490static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
 491                                    struct persistent_ram_ecc_info *ecc_info)
 492{
 493        int ret;
 494        bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD);
 495
 496        ret = persistent_ram_init_ecc(prz, ecc_info);
 497        if (ret) {
 498                pr_warn("ECC failed %s\n", prz->label);
 499                return ret;
 500        }
 501
 502        sig ^= PERSISTENT_RAM_SIG;
 503
 504        if (prz->buffer->sig == sig) {
 505                if (buffer_size(prz) == 0) {
 506                        pr_debug("found existing empty buffer\n");
 507                        return 0;
 508                }
 509
 510                if (buffer_size(prz) > prz->buffer_size ||
 511                    buffer_start(prz) > buffer_size(prz)) {
 512                        pr_info("found existing invalid buffer, size %zu, start %zu\n",
 513                                buffer_size(prz), buffer_start(prz));
 514                        zap = true;
 515                } else {
 516                        pr_debug("found existing buffer, size %zu, start %zu\n",
 517                                 buffer_size(prz), buffer_start(prz));
 518                        persistent_ram_save_old(prz);
 519                }
 520        } else {
 521                pr_debug("no valid data in buffer (sig = 0x%08x)\n",
 522                         prz->buffer->sig);
 523                prz->buffer->sig = sig;
 524                zap = true;
 525        }
 526
 527        /* Reset missing, invalid, or single-use memory area. */
 528        if (zap)
 529                persistent_ram_zap(prz);
 530
 531        return 0;
 532}
 533
 534void persistent_ram_free(struct persistent_ram_zone *prz)
 535{
 536        if (!prz)
 537                return;
 538
 539        if (prz->vaddr) {
 540                if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
 541                        /* We must vunmap() at page-granularity. */
 542                        vunmap(prz->vaddr - offset_in_page(prz->paddr));
 543                } else {
 544                        iounmap(prz->vaddr);
 545                        release_mem_region(prz->paddr, prz->size);
 546                }
 547                prz->vaddr = NULL;
 548        }
 549        if (prz->rs_decoder) {
 550                free_rs(prz->rs_decoder);
 551                prz->rs_decoder = NULL;
 552        }
 553        kfree(prz->ecc_info.par);
 554        prz->ecc_info.par = NULL;
 555
 556        persistent_ram_free_old(prz);
 557        kfree(prz->label);
 558        kfree(prz);
 559}
 560
 561struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
 562                        u32 sig, struct persistent_ram_ecc_info *ecc_info,
 563                        unsigned int memtype, u32 flags, char *label)
 564{
 565        struct persistent_ram_zone *prz;
 566        int ret = -ENOMEM;
 567
 568        prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
 569        if (!prz) {
 570                pr_err("failed to allocate persistent ram zone\n");
 571                goto err;
 572        }
 573
 574        /* Initialize general buffer state. */
 575        raw_spin_lock_init(&prz->buffer_lock);
 576        prz->flags = flags;
 577        prz->label = label;
 578
 579        ret = persistent_ram_buffer_map(start, size, prz, memtype);
 580        if (ret)
 581                goto err;
 582
 583        ret = persistent_ram_post_init(prz, sig, ecc_info);
 584        if (ret)
 585                goto err;
 586
 587        pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n",
 588                prz->label, prz->size, (unsigned long long)prz->paddr,
 589                sizeof(*prz->buffer), prz->buffer_size,
 590                prz->size - sizeof(*prz->buffer) - prz->buffer_size,
 591                prz->ecc_info.ecc_size, prz->ecc_info.block_size);
 592
 593        return prz;
 594err:
 595        persistent_ram_free(prz);
 596        return ERR_PTR(ret);
 597}
 598