linux/arch/x86/platform/intel-quark/imr.c
<<
>>
Prefs
   1/**
   2 * imr.c -- Intel Isolated Memory Region driver
   3 *
   4 * Copyright(c) 2013 Intel Corporation.
   5 * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
   6 *
   7 * IMR registers define an isolated region of memory that can
   8 * be masked to prohibit certain system agents from accessing memory.
   9 * When a device behind a masked port performs an access - snooped or
  10 * not, an IMR may optionally prevent that transaction from changing
  11 * the state of memory or from getting correct data in response to the
  12 * operation.
  13 *
  14 * Write data will be dropped and reads will return 0xFFFFFFFF, the
  15 * system will reset and system BIOS will print out an error message to
  16 * inform the user that an IMR has been violated.
  17 *
  18 * This code is based on the Linux MTRR code and reference code from
  19 * Intel's Quark BSP EFI, Linux and grub code.
  20 *
  21 * See quark-x1000-datasheet.pdf for register definitions.
  22 * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
  23 */
  24
  25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  26
  27#include <asm-generic/sections.h>
  28#include <asm/cpu_device_id.h>
  29#include <asm/imr.h>
  30#include <asm/iosf_mbi.h>
  31#include <linux/debugfs.h>
  32#include <linux/init.h>
  33#include <linux/mm.h>
  34#include <linux/types.h>
  35
  36struct imr_device {
  37        struct dentry   *file;
  38        bool            init;
  39        struct mutex    lock;
  40        int             max_imr;
  41        int             reg_base;
  42};
  43
  44static struct imr_device imr_dev;
  45
  46/*
  47 * IMR read/write mask control registers.
  48 * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
  49 * bit definitions.
  50 *
  51 * addr_hi
  52 * 31           Lock bit
  53 * 30:24        Reserved
  54 * 23:2         1 KiB aligned lo address
  55 * 1:0          Reserved
  56 *
  57 * addr_hi
  58 * 31:24        Reserved
  59 * 23:2         1 KiB aligned hi address
  60 * 1:0          Reserved
  61 */
  62#define IMR_LOCK        BIT(31)
  63
  64struct imr_regs {
  65        u32 addr_lo;
  66        u32 addr_hi;
  67        u32 rmask;
  68        u32 wmask;
  69};
  70
  71#define IMR_NUM_REGS    (sizeof(struct imr_regs)/sizeof(u32))
  72#define IMR_SHIFT       8
  73#define imr_to_phys(x)  ((x) << IMR_SHIFT)
  74#define phys_to_imr(x)  ((x) >> IMR_SHIFT)
  75
  76/**
  77 * imr_is_enabled - true if an IMR is enabled false otherwise.
  78 *
  79 * Determines if an IMR is enabled based on address range and read/write
  80 * mask. An IMR set with an address range set to zero and a read/write
  81 * access mask set to all is considered to be disabled. An IMR in any
  82 * other state - for example set to zero but without read/write access
  83 * all is considered to be enabled. This definition of disabled is how
  84 * firmware switches off an IMR and is maintained in kernel for
  85 * consistency.
  86 *
  87 * @imr:        pointer to IMR descriptor.
  88 * @return:     true if IMR enabled false if disabled.
  89 */
  90static inline int imr_is_enabled(struct imr_regs *imr)
  91{
  92        return !(imr->rmask == IMR_READ_ACCESS_ALL &&
  93                 imr->wmask == IMR_WRITE_ACCESS_ALL &&
  94                 imr_to_phys(imr->addr_lo) == 0 &&
  95                 imr_to_phys(imr->addr_hi) == 0);
  96}
  97
  98/**
  99 * imr_read - read an IMR at a given index.
 100 *
 101 * Requires caller to hold imr mutex.
 102 *
 103 * @idev:       pointer to imr_device structure.
 104 * @imr_id:     IMR entry to read.
 105 * @imr:        IMR structure representing address and access masks.
 106 * @return:     0 on success or error code passed from mbi_iosf on failure.
 107 */
 108static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
 109{
 110        u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
 111        int ret;
 112
 113        ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_lo);
 114        if (ret)
 115                return ret;
 116
 117        ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_hi);
 118        if (ret)
 119                return ret;
 120
 121        ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->rmask);
 122        if (ret)
 123                return ret;
 124
 125        return iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->wmask);
 126}
 127
 128/**
 129 * imr_write - write an IMR at a given index.
 130 *
 131 * Requires caller to hold imr mutex.
 132 * Note lock bits need to be written independently of address bits.
 133 *
 134 * @idev:       pointer to imr_device structure.
 135 * @imr_id:     IMR entry to write.
 136 * @imr:        IMR structure representing address and access masks.
 137 * @return:     0 on success or error code passed from mbi_iosf on failure.
 138 */
 139static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
 140{
 141        unsigned long flags;
 142        u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
 143        int ret;
 144
 145        local_irq_save(flags);
 146
 147        ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_lo);
 148        if (ret)
 149                goto failed;
 150
 151        ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_hi);
 152        if (ret)
 153                goto failed;
 154
 155        ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->rmask);
 156        if (ret)
 157                goto failed;
 158
 159        ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->wmask);
 160        if (ret)
 161                goto failed;
 162
 163        local_irq_restore(flags);
 164        return 0;
 165failed:
 166        /*
 167         * If writing to the IOSF failed then we're in an unknown state,
 168         * likely a very bad state. An IMR in an invalid state will almost
 169         * certainly lead to a memory access violation.
 170         */
 171        local_irq_restore(flags);
 172        WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
 173             imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
 174
 175        return ret;
 176}
 177
 178/**
 179 * imr_dbgfs_state_show - print state of IMR registers.
 180 *
 181 * @s:          pointer to seq_file for output.
 182 * @unused:     unused parameter.
 183 * @return:     0 on success or error code passed from mbi_iosf on failure.
 184 */
 185static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
 186{
 187        phys_addr_t base;
 188        phys_addr_t end;
 189        int i;
 190        struct imr_device *idev = s->private;
 191        struct imr_regs imr;
 192        size_t size;
 193        int ret = -ENODEV;
 194
 195        mutex_lock(&idev->lock);
 196
 197        for (i = 0; i < idev->max_imr; i++) {
 198
 199                ret = imr_read(idev, i, &imr);
 200                if (ret)
 201                        break;
 202
 203                /*
 204                 * Remember to add IMR_ALIGN bytes to size to indicate the
 205                 * inherent IMR_ALIGN size bytes contained in the masked away
 206                 * lower ten bits.
 207                 */
 208                if (imr_is_enabled(&imr)) {
 209                        base = imr_to_phys(imr.addr_lo);
 210                        end = imr_to_phys(imr.addr_hi) + IMR_MASK;
 211                        size = end - base + 1;
 212                } else {
 213                        base = 0;
 214                        end = 0;
 215                        size = 0;
 216                }
 217                seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
 218                           "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
 219                           &base, &end, size, imr.rmask, imr.wmask,
 220                           imr_is_enabled(&imr) ? "enabled " : "disabled",
 221                           imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
 222        }
 223
 224        mutex_unlock(&idev->lock);
 225        return ret;
 226}
 227DEFINE_SHOW_ATTRIBUTE(imr_dbgfs_state);
 228
 229/**
 230 * imr_debugfs_register - register debugfs hooks.
 231 *
 232 * @idev:       pointer to imr_device structure.
 233 * @return:     0 on success - errno on failure.
 234 */
 235static int imr_debugfs_register(struct imr_device *idev)
 236{
 237        idev->file = debugfs_create_file("imr_state", 0444, NULL, idev,
 238                                         &imr_dbgfs_state_fops);
 239        return PTR_ERR_OR_ZERO(idev->file);
 240}
 241
 242/**
 243 * imr_check_params - check passed address range IMR alignment and non-zero size
 244 *
 245 * @base:       base address of intended IMR.
 246 * @size:       size of intended IMR.
 247 * @return:     zero on valid range -EINVAL on unaligned base/size.
 248 */
 249static int imr_check_params(phys_addr_t base, size_t size)
 250{
 251        if ((base & IMR_MASK) || (size & IMR_MASK)) {
 252                pr_err("base %pa size 0x%08zx must align to 1KiB\n",
 253                        &base, size);
 254                return -EINVAL;
 255        }
 256        if (size == 0)
 257                return -EINVAL;
 258
 259        return 0;
 260}
 261
 262/**
 263 * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
 264 *
 265 * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
 266 * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
 267 * as a result.
 268 *
 269 * @size:       input size bytes.
 270 * @return:     reduced size.
 271 */
 272static inline size_t imr_raw_size(size_t size)
 273{
 274        return size - IMR_ALIGN;
 275}
 276
 277/**
 278 * imr_address_overlap - detects an address overlap.
 279 *
 280 * @addr:       address to check against an existing IMR.
 281 * @imr:        imr being checked.
 282 * @return:     true for overlap false for no overlap.
 283 */
 284static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
 285{
 286        return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
 287}
 288
 289/**
 290 * imr_add_range - add an Isolated Memory Region.
 291 *
 292 * @base:       physical base address of region aligned to 1KiB.
 293 * @size:       physical size of region in bytes must be aligned to 1KiB.
 294 * @read_mask:  read access mask.
 295 * @write_mask: write access mask.
 296 * @return:     zero on success or negative value indicating error.
 297 */
 298int imr_add_range(phys_addr_t base, size_t size,
 299                  unsigned int rmask, unsigned int wmask)
 300{
 301        phys_addr_t end;
 302        unsigned int i;
 303        struct imr_device *idev = &imr_dev;
 304        struct imr_regs imr;
 305        size_t raw_size;
 306        int reg;
 307        int ret;
 308
 309        if (WARN_ONCE(idev->init == false, "driver not initialized"))
 310                return -ENODEV;
 311
 312        ret = imr_check_params(base, size);
 313        if (ret)
 314                return ret;
 315
 316        /* Tweak the size value. */
 317        raw_size = imr_raw_size(size);
 318        end = base + raw_size;
 319
 320        /*
 321         * Check for reserved IMR value common to firmware, kernel and grub
 322         * indicating a disabled IMR.
 323         */
 324        imr.addr_lo = phys_to_imr(base);
 325        imr.addr_hi = phys_to_imr(end);
 326        imr.rmask = rmask;
 327        imr.wmask = wmask;
 328        if (!imr_is_enabled(&imr))
 329                return -ENOTSUPP;
 330
 331        mutex_lock(&idev->lock);
 332
 333        /*
 334         * Find a free IMR while checking for an existing overlapping range.
 335         * Note there's no restriction in silicon to prevent IMR overlaps.
 336         * For the sake of simplicity and ease in defining/debugging an IMR
 337         * memory map we exclude IMR overlaps.
 338         */
 339        reg = -1;
 340        for (i = 0; i < idev->max_imr; i++) {
 341                ret = imr_read(idev, i, &imr);
 342                if (ret)
 343                        goto failed;
 344
 345                /* Find overlap @ base or end of requested range. */
 346                ret = -EINVAL;
 347                if (imr_is_enabled(&imr)) {
 348                        if (imr_address_overlap(base, &imr))
 349                                goto failed;
 350                        if (imr_address_overlap(end, &imr))
 351                                goto failed;
 352                } else {
 353                        reg = i;
 354                }
 355        }
 356
 357        /* Error out if we have no free IMR entries. */
 358        if (reg == -1) {
 359                ret = -ENOMEM;
 360                goto failed;
 361        }
 362
 363        pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
 364                 reg, &base, &end, raw_size, rmask, wmask);
 365
 366        /* Enable IMR at specified range and access mask. */
 367        imr.addr_lo = phys_to_imr(base);
 368        imr.addr_hi = phys_to_imr(end);
 369        imr.rmask = rmask;
 370        imr.wmask = wmask;
 371
 372        ret = imr_write(idev, reg, &imr);
 373        if (ret < 0) {
 374                /*
 375                 * In the highly unlikely event iosf_mbi_write failed
 376                 * attempt to rollback the IMR setup skipping the trapping
 377                 * of further IOSF write failures.
 378                 */
 379                imr.addr_lo = 0;
 380                imr.addr_hi = 0;
 381                imr.rmask = IMR_READ_ACCESS_ALL;
 382                imr.wmask = IMR_WRITE_ACCESS_ALL;
 383                imr_write(idev, reg, &imr);
 384        }
 385failed:
 386        mutex_unlock(&idev->lock);
 387        return ret;
 388}
 389EXPORT_SYMBOL_GPL(imr_add_range);
 390
 391/**
 392 * __imr_remove_range - delete an Isolated Memory Region.
 393 *
 394 * This function allows you to delete an IMR by its index specified by reg or
 395 * by address range specified by base and size respectively. If you specify an
 396 * index on its own the base and size parameters are ignored.
 397 * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
 398 * imr_remove_range(-1, base, size); delete IMR from base to base+size.
 399 *
 400 * @reg:        imr index to remove.
 401 * @base:       physical base address of region aligned to 1 KiB.
 402 * @size:       physical size of region in bytes aligned to 1 KiB.
 403 * @return:     -EINVAL on invalid range or out or range id
 404 *              -ENODEV if reg is valid but no IMR exists or is locked
 405 *              0 on success.
 406 */
 407static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
 408{
 409        phys_addr_t end;
 410        bool found = false;
 411        unsigned int i;
 412        struct imr_device *idev = &imr_dev;
 413        struct imr_regs imr;
 414        size_t raw_size;
 415        int ret = 0;
 416
 417        if (WARN_ONCE(idev->init == false, "driver not initialized"))
 418                return -ENODEV;
 419
 420        /*
 421         * Validate address range if deleting by address, else we are
 422         * deleting by index where base and size will be ignored.
 423         */
 424        if (reg == -1) {
 425                ret = imr_check_params(base, size);
 426                if (ret)
 427                        return ret;
 428        }
 429
 430        /* Tweak the size value. */
 431        raw_size = imr_raw_size(size);
 432        end = base + raw_size;
 433
 434        mutex_lock(&idev->lock);
 435
 436        if (reg >= 0) {
 437                /* If a specific IMR is given try to use it. */
 438                ret = imr_read(idev, reg, &imr);
 439                if (ret)
 440                        goto failed;
 441
 442                if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
 443                        ret = -ENODEV;
 444                        goto failed;
 445                }
 446                found = true;
 447        } else {
 448                /* Search for match based on address range. */
 449                for (i = 0; i < idev->max_imr; i++) {
 450                        ret = imr_read(idev, i, &imr);
 451                        if (ret)
 452                                goto failed;
 453
 454                        if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
 455                                continue;
 456
 457                        if ((imr_to_phys(imr.addr_lo) == base) &&
 458                            (imr_to_phys(imr.addr_hi) == end)) {
 459                                found = true;
 460                                reg = i;
 461                                break;
 462                        }
 463                }
 464        }
 465
 466        if (!found) {
 467                ret = -ENODEV;
 468                goto failed;
 469        }
 470
 471        pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
 472
 473        /* Tear down the IMR. */
 474        imr.addr_lo = 0;
 475        imr.addr_hi = 0;
 476        imr.rmask = IMR_READ_ACCESS_ALL;
 477        imr.wmask = IMR_WRITE_ACCESS_ALL;
 478
 479        ret = imr_write(idev, reg, &imr);
 480
 481failed:
 482        mutex_unlock(&idev->lock);
 483        return ret;
 484}
 485
 486/**
 487 * imr_remove_range - delete an Isolated Memory Region by address
 488 *
 489 * This function allows you to delete an IMR by an address range specified
 490 * by base and size respectively.
 491 * imr_remove_range(base, size); delete IMR from base to base+size.
 492 *
 493 * @base:       physical base address of region aligned to 1 KiB.
 494 * @size:       physical size of region in bytes aligned to 1 KiB.
 495 * @return:     -EINVAL on invalid range or out or range id
 496 *              -ENODEV if reg is valid but no IMR exists or is locked
 497 *              0 on success.
 498 */
 499int imr_remove_range(phys_addr_t base, size_t size)
 500{
 501        return __imr_remove_range(-1, base, size);
 502}
 503EXPORT_SYMBOL_GPL(imr_remove_range);
 504
 505/**
 506 * imr_clear - delete an Isolated Memory Region by index
 507 *
 508 * This function allows you to delete an IMR by an address range specified
 509 * by the index of the IMR. Useful for initial sanitization of the IMR
 510 * address map.
 511 * imr_ge(base, size); delete IMR from base to base+size.
 512 *
 513 * @reg:        imr index to remove.
 514 * @return:     -EINVAL on invalid range or out or range id
 515 *              -ENODEV if reg is valid but no IMR exists or is locked
 516 *              0 on success.
 517 */
 518static inline int imr_clear(int reg)
 519{
 520        return __imr_remove_range(reg, 0, 0);
 521}
 522
 523/**
 524 * imr_fixup_memmap - Tear down IMRs used during bootup.
 525 *
 526 * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
 527 * that need to be removed before the kernel hands out one of the IMR
 528 * encased addresses to a downstream DMA agent such as the SD or Ethernet.
 529 * IMRs on Galileo are setup to immediately reset the system on violation.
 530 * As a result if you're running a root filesystem from SD - you'll need
 531 * the boot-time IMRs torn down or you'll find seemingly random resets when
 532 * using your filesystem.
 533 *
 534 * @idev:       pointer to imr_device structure.
 535 * @return:
 536 */
 537static void __init imr_fixup_memmap(struct imr_device *idev)
 538{
 539        phys_addr_t base = virt_to_phys(&_text);
 540        size_t size = virt_to_phys(&__end_rodata) - base;
 541        unsigned long start, end;
 542        int i;
 543        int ret;
 544
 545        /* Tear down all existing unlocked IMRs. */
 546        for (i = 0; i < idev->max_imr; i++)
 547                imr_clear(i);
 548
 549        start = (unsigned long)_text;
 550        end = (unsigned long)__end_rodata - 1;
 551
 552        /*
 553         * Setup an unlocked IMR around the physical extent of the kernel
 554         * from the beginning of the .text secton to the end of the
 555         * .rodata section as one physically contiguous block.
 556         *
 557         * We don't round up @size since it is already PAGE_SIZE aligned.
 558         * See vmlinux.lds.S for details.
 559         */
 560        ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
 561        if (ret < 0) {
 562                pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
 563                        size / 1024, start, end);
 564        } else {
 565                pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
 566                        size / 1024, start, end);
 567        }
 568
 569}
 570
 571static const struct x86_cpu_id imr_ids[] __initconst = {
 572        { X86_VENDOR_INTEL, 5, 9 },     /* Intel Quark SoC X1000. */
 573        {}
 574};
 575
 576/**
 577 * imr_init - entry point for IMR driver.
 578 *
 579 * return: -ENODEV for no IMR support 0 if good to go.
 580 */
 581static int __init imr_init(void)
 582{
 583        struct imr_device *idev = &imr_dev;
 584        int ret;
 585
 586        if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
 587                return -ENODEV;
 588
 589        idev->max_imr = QUARK_X1000_IMR_MAX;
 590        idev->reg_base = QUARK_X1000_IMR_REGBASE;
 591        idev->init = true;
 592
 593        mutex_init(&idev->lock);
 594        ret = imr_debugfs_register(idev);
 595        if (ret != 0)
 596                pr_warn("debugfs register failed!\n");
 597        imr_fixup_memmap(idev);
 598        return 0;
 599}
 600device_initcall(imr_init);
 601