linux/drivers/mtd/spi-nor/intel-spi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel PCH/PCU SPI flash driver.
   4 *
   5 * Copyright (C) 2016, Intel Corporation
   6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
   7 */
   8
   9#include <linux/err.h>
  10#include <linux/io.h>
  11#include <linux/iopoll.h>
  12#include <linux/module.h>
  13#include <linux/sched.h>
  14#include <linux/sizes.h>
  15#include <linux/mtd/mtd.h>
  16#include <linux/mtd/partitions.h>
  17#include <linux/mtd/spi-nor.h>
  18#include <linux/platform_data/intel-spi.h>
  19
  20#include "intel-spi.h"
  21
  22/* Offsets are from @ispi->base */
  23#define BFPREG                          0x00
  24
  25#define HSFSTS_CTL                      0x04
  26#define HSFSTS_CTL_FSMIE                BIT(31)
  27#define HSFSTS_CTL_FDBC_SHIFT           24
  28#define HSFSTS_CTL_FDBC_MASK            (0x3f << HSFSTS_CTL_FDBC_SHIFT)
  29
  30#define HSFSTS_CTL_FCYCLE_SHIFT         17
  31#define HSFSTS_CTL_FCYCLE_MASK          (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
  32/* HW sequencer opcodes */
  33#define HSFSTS_CTL_FCYCLE_READ          (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
  34#define HSFSTS_CTL_FCYCLE_WRITE         (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
  35#define HSFSTS_CTL_FCYCLE_ERASE         (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
  36#define HSFSTS_CTL_FCYCLE_ERASE_64K     (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
  37#define HSFSTS_CTL_FCYCLE_RDID          (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
  38#define HSFSTS_CTL_FCYCLE_WRSR          (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
  39#define HSFSTS_CTL_FCYCLE_RDSR          (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
  40
  41#define HSFSTS_CTL_FGO                  BIT(16)
  42#define HSFSTS_CTL_FLOCKDN              BIT(15)
  43#define HSFSTS_CTL_FDV                  BIT(14)
  44#define HSFSTS_CTL_SCIP                 BIT(5)
  45#define HSFSTS_CTL_AEL                  BIT(2)
  46#define HSFSTS_CTL_FCERR                BIT(1)
  47#define HSFSTS_CTL_FDONE                BIT(0)
  48
  49#define FADDR                           0x08
  50#define DLOCK                           0x0c
  51#define FDATA(n)                        (0x10 + ((n) * 4))
  52
  53#define FRACC                           0x50
  54
  55#define FREG(n)                         (0x54 + ((n) * 4))
  56#define FREG_BASE_MASK                  0x3fff
  57#define FREG_LIMIT_SHIFT                16
  58#define FREG_LIMIT_MASK                 (0x03fff << FREG_LIMIT_SHIFT)
  59
  60/* Offset is from @ispi->pregs */
  61#define PR(n)                           ((n) * 4)
  62#define PR_WPE                          BIT(31)
  63#define PR_LIMIT_SHIFT                  16
  64#define PR_LIMIT_MASK                   (0x3fff << PR_LIMIT_SHIFT)
  65#define PR_RPE                          BIT(15)
  66#define PR_BASE_MASK                    0x3fff
  67
  68/* Offsets are from @ispi->sregs */
  69#define SSFSTS_CTL                      0x00
  70#define SSFSTS_CTL_FSMIE                BIT(23)
  71#define SSFSTS_CTL_DS                   BIT(22)
  72#define SSFSTS_CTL_DBC_SHIFT            16
  73#define SSFSTS_CTL_SPOP                 BIT(11)
  74#define SSFSTS_CTL_ACS                  BIT(10)
  75#define SSFSTS_CTL_SCGO                 BIT(9)
  76#define SSFSTS_CTL_COP_SHIFT            12
  77#define SSFSTS_CTL_FRS                  BIT(7)
  78#define SSFSTS_CTL_DOFRS                BIT(6)
  79#define SSFSTS_CTL_AEL                  BIT(4)
  80#define SSFSTS_CTL_FCERR                BIT(3)
  81#define SSFSTS_CTL_FDONE                BIT(2)
  82#define SSFSTS_CTL_SCIP                 BIT(0)
  83
  84#define PREOP_OPTYPE                    0x04
  85#define OPMENU0                         0x08
  86#define OPMENU1                         0x0c
  87
  88#define OPTYPE_READ_NO_ADDR             0
  89#define OPTYPE_WRITE_NO_ADDR            1
  90#define OPTYPE_READ_WITH_ADDR           2
  91#define OPTYPE_WRITE_WITH_ADDR          3
  92
  93/* CPU specifics */
  94#define BYT_PR                          0x74
  95#define BYT_SSFSTS_CTL                  0x90
  96#define BYT_BCR                         0xfc
  97#define BYT_BCR_WPD                     BIT(0)
  98#define BYT_FREG_NUM                    5
  99#define BYT_PR_NUM                      5
 100
 101#define LPT_PR                          0x74
 102#define LPT_SSFSTS_CTL                  0x90
 103#define LPT_FREG_NUM                    5
 104#define LPT_PR_NUM                      5
 105
 106#define BXT_PR                          0x84
 107#define BXT_SSFSTS_CTL                  0xa0
 108#define BXT_FREG_NUM                    12
 109#define BXT_PR_NUM                      6
 110
 111#define LVSCC                           0xc4
 112#define UVSCC                           0xc8
 113#define ERASE_OPCODE_SHIFT              8
 114#define ERASE_OPCODE_MASK               (0xff << ERASE_OPCODE_SHIFT)
 115#define ERASE_64K_OPCODE_SHIFT          16
 116#define ERASE_64K_OPCODE_MASK           (0xff << ERASE_OPCODE_SHIFT)
 117
 118#define INTEL_SPI_TIMEOUT               5000 /* ms */
 119#define INTEL_SPI_FIFO_SZ               64
 120
 121/**
 122 * struct intel_spi - Driver private data
 123 * @dev: Device pointer
 124 * @info: Pointer to board specific info
 125 * @nor: SPI NOR layer structure
 126 * @base: Beginning of MMIO space
 127 * @pregs: Start of protection registers
 128 * @sregs: Start of software sequencer registers
 129 * @nregions: Maximum number of regions
 130 * @pr_num: Maximum number of protected range registers
 131 * @writeable: Is the chip writeable
 132 * @locked: Is SPI setting locked
 133 * @swseq_reg: Use SW sequencer in register reads/writes
 134 * @swseq_erase: Use SW sequencer in erase operation
 135 * @erase_64k: 64k erase supported
 136 * @atomic_preopcode: Holds preopcode when atomic sequence is requested
 137 * @opcodes: Opcodes which are supported. This are programmed by BIOS
 138 *           before it locks down the controller.
 139 */
 140struct intel_spi {
 141        struct device *dev;
 142        const struct intel_spi_boardinfo *info;
 143        struct spi_nor nor;
 144        void __iomem *base;
 145        void __iomem *pregs;
 146        void __iomem *sregs;
 147        size_t nregions;
 148        size_t pr_num;
 149        bool writeable;
 150        bool locked;
 151        bool swseq_reg;
 152        bool swseq_erase;
 153        bool erase_64k;
 154        u8 atomic_preopcode;
 155        u8 opcodes[8];
 156};
 157
 158static bool writeable;
 159module_param(writeable, bool, 0);
 160MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
 161
 162static void intel_spi_dump_regs(struct intel_spi *ispi)
 163{
 164        u32 value;
 165        int i;
 166
 167        dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
 168
 169        value = readl(ispi->base + HSFSTS_CTL);
 170        dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
 171        if (value & HSFSTS_CTL_FLOCKDN)
 172                dev_dbg(ispi->dev, "-> Locked\n");
 173
 174        dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
 175        dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
 176
 177        for (i = 0; i < 16; i++)
 178                dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
 179                        i, readl(ispi->base + FDATA(i)));
 180
 181        dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
 182
 183        for (i = 0; i < ispi->nregions; i++)
 184                dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
 185                        readl(ispi->base + FREG(i)));
 186        for (i = 0; i < ispi->pr_num; i++)
 187                dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
 188                        readl(ispi->pregs + PR(i)));
 189
 190        value = readl(ispi->sregs + SSFSTS_CTL);
 191        dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
 192        dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
 193                readl(ispi->sregs + PREOP_OPTYPE));
 194        dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", readl(ispi->sregs + OPMENU0));
 195        dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", readl(ispi->sregs + OPMENU1));
 196
 197        if (ispi->info->type == INTEL_SPI_BYT)
 198                dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
 199
 200        dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
 201        dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
 202
 203        dev_dbg(ispi->dev, "Protected regions:\n");
 204        for (i = 0; i < ispi->pr_num; i++) {
 205                u32 base, limit;
 206
 207                value = readl(ispi->pregs + PR(i));
 208                if (!(value & (PR_WPE | PR_RPE)))
 209                        continue;
 210
 211                limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
 212                base = value & PR_BASE_MASK;
 213
 214                dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
 215                         i, base << 12, (limit << 12) | 0xfff,
 216                         value & PR_WPE ? 'W' : '.',
 217                         value & PR_RPE ? 'R' : '.');
 218        }
 219
 220        dev_dbg(ispi->dev, "Flash regions:\n");
 221        for (i = 0; i < ispi->nregions; i++) {
 222                u32 region, base, limit;
 223
 224                region = readl(ispi->base + FREG(i));
 225                base = region & FREG_BASE_MASK;
 226                limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
 227
 228                if (base >= limit || (i > 0 && limit == 0))
 229                        dev_dbg(ispi->dev, " %02d disabled\n", i);
 230                else
 231                        dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
 232                                 i, base << 12, (limit << 12) | 0xfff);
 233        }
 234
 235        dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
 236                ispi->swseq_reg ? 'S' : 'H');
 237        dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
 238                ispi->swseq_erase ? 'S' : 'H');
 239}
 240
 241/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
 242static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
 243{
 244        size_t bytes;
 245        int i = 0;
 246
 247        if (size > INTEL_SPI_FIFO_SZ)
 248                return -EINVAL;
 249
 250        while (size > 0) {
 251                bytes = min_t(size_t, size, 4);
 252                memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
 253                size -= bytes;
 254                buf += bytes;
 255                i++;
 256        }
 257
 258        return 0;
 259}
 260
 261/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
 262static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
 263                                 size_t size)
 264{
 265        size_t bytes;
 266        int i = 0;
 267
 268        if (size > INTEL_SPI_FIFO_SZ)
 269                return -EINVAL;
 270
 271        while (size > 0) {
 272                bytes = min_t(size_t, size, 4);
 273                memcpy_toio(ispi->base + FDATA(i), buf, bytes);
 274                size -= bytes;
 275                buf += bytes;
 276                i++;
 277        }
 278
 279        return 0;
 280}
 281
 282static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
 283{
 284        u32 val;
 285
 286        return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
 287                                  !(val & HSFSTS_CTL_SCIP), 40,
 288                                  INTEL_SPI_TIMEOUT * 1000);
 289}
 290
 291static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
 292{
 293        u32 val;
 294
 295        return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
 296                                  !(val & SSFSTS_CTL_SCIP), 40,
 297                                  INTEL_SPI_TIMEOUT * 1000);
 298}
 299
 300static int intel_spi_init(struct intel_spi *ispi)
 301{
 302        u32 opmenu0, opmenu1, lvscc, uvscc, val;
 303        int i;
 304
 305        switch (ispi->info->type) {
 306        case INTEL_SPI_BYT:
 307                ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
 308                ispi->pregs = ispi->base + BYT_PR;
 309                ispi->nregions = BYT_FREG_NUM;
 310                ispi->pr_num = BYT_PR_NUM;
 311                ispi->swseq_reg = true;
 312
 313                if (writeable) {
 314                        /* Disable write protection */
 315                        val = readl(ispi->base + BYT_BCR);
 316                        if (!(val & BYT_BCR_WPD)) {
 317                                val |= BYT_BCR_WPD;
 318                                writel(val, ispi->base + BYT_BCR);
 319                                val = readl(ispi->base + BYT_BCR);
 320                        }
 321
 322                        ispi->writeable = !!(val & BYT_BCR_WPD);
 323                }
 324
 325                break;
 326
 327        case INTEL_SPI_LPT:
 328                ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
 329                ispi->pregs = ispi->base + LPT_PR;
 330                ispi->nregions = LPT_FREG_NUM;
 331                ispi->pr_num = LPT_PR_NUM;
 332                ispi->swseq_reg = true;
 333                break;
 334
 335        case INTEL_SPI_BXT:
 336                ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
 337                ispi->pregs = ispi->base + BXT_PR;
 338                ispi->nregions = BXT_FREG_NUM;
 339                ispi->pr_num = BXT_PR_NUM;
 340                ispi->erase_64k = true;
 341                break;
 342
 343        default:
 344                return -EINVAL;
 345        }
 346
 347        /* Disable #SMI generation from HW sequencer */
 348        val = readl(ispi->base + HSFSTS_CTL);
 349        val &= ~HSFSTS_CTL_FSMIE;
 350        writel(val, ispi->base + HSFSTS_CTL);
 351
 352        /*
 353         * Determine whether erase operation should use HW or SW sequencer.
 354         *
 355         * The HW sequencer has a predefined list of opcodes, with only the
 356         * erase opcode being programmable in LVSCC and UVSCC registers.
 357         * If these registers don't contain a valid erase opcode, erase
 358         * cannot be done using HW sequencer.
 359         */
 360        lvscc = readl(ispi->base + LVSCC);
 361        uvscc = readl(ispi->base + UVSCC);
 362        if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
 363                ispi->swseq_erase = true;
 364        /* SPI controller on Intel BXT supports 64K erase opcode */
 365        if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
 366                if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
 367                    !(uvscc & ERASE_64K_OPCODE_MASK))
 368                        ispi->erase_64k = false;
 369
 370        /*
 371         * Some controllers can only do basic operations using hardware
 372         * sequencer. All other operations are supposed to be carried out
 373         * using software sequencer.
 374         */
 375        if (ispi->swseq_reg) {
 376                /* Disable #SMI generation from SW sequencer */
 377                val = readl(ispi->sregs + SSFSTS_CTL);
 378                val &= ~SSFSTS_CTL_FSMIE;
 379                writel(val, ispi->sregs + SSFSTS_CTL);
 380        }
 381
 382        /* Check controller's lock status */
 383        val = readl(ispi->base + HSFSTS_CTL);
 384        ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
 385
 386        if (ispi->locked) {
 387                /*
 388                 * BIOS programs allowed opcodes and then locks down the
 389                 * register. So read back what opcodes it decided to support.
 390                 * That's the set we are going to support as well.
 391                 */
 392                opmenu0 = readl(ispi->sregs + OPMENU0);
 393                opmenu1 = readl(ispi->sregs + OPMENU1);
 394
 395                if (opmenu0 && opmenu1) {
 396                        for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
 397                                ispi->opcodes[i] = opmenu0 >> i * 8;
 398                                ispi->opcodes[i + 4] = opmenu1 >> i * 8;
 399                        }
 400                }
 401        }
 402
 403        intel_spi_dump_regs(ispi);
 404
 405        return 0;
 406}
 407
 408static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
 409{
 410        int i;
 411        int preop;
 412
 413        if (ispi->locked) {
 414                for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
 415                        if (ispi->opcodes[i] == opcode)
 416                                return i;
 417
 418                return -EINVAL;
 419        }
 420
 421        /* The lock is off, so just use index 0 */
 422        writel(opcode, ispi->sregs + OPMENU0);
 423        preop = readw(ispi->sregs + PREOP_OPTYPE);
 424        writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
 425
 426        return 0;
 427}
 428
 429static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
 430{
 431        u32 val, status;
 432        int ret;
 433
 434        val = readl(ispi->base + HSFSTS_CTL);
 435        val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
 436
 437        switch (opcode) {
 438        case SPINOR_OP_RDID:
 439                val |= HSFSTS_CTL_FCYCLE_RDID;
 440                break;
 441        case SPINOR_OP_WRSR:
 442                val |= HSFSTS_CTL_FCYCLE_WRSR;
 443                break;
 444        case SPINOR_OP_RDSR:
 445                val |= HSFSTS_CTL_FCYCLE_RDSR;
 446                break;
 447        default:
 448                return -EINVAL;
 449        }
 450
 451        if (len > INTEL_SPI_FIFO_SZ)
 452                return -EINVAL;
 453
 454        val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
 455        val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
 456        val |= HSFSTS_CTL_FGO;
 457        writel(val, ispi->base + HSFSTS_CTL);
 458
 459        ret = intel_spi_wait_hw_busy(ispi);
 460        if (ret)
 461                return ret;
 462
 463        status = readl(ispi->base + HSFSTS_CTL);
 464        if (status & HSFSTS_CTL_FCERR)
 465                return -EIO;
 466        else if (status & HSFSTS_CTL_AEL)
 467                return -EACCES;
 468
 469        return 0;
 470}
 471
 472static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
 473                              int optype)
 474{
 475        u32 val = 0, status;
 476        u8 atomic_preopcode;
 477        int ret;
 478
 479        ret = intel_spi_opcode_index(ispi, opcode, optype);
 480        if (ret < 0)
 481                return ret;
 482
 483        if (len > INTEL_SPI_FIFO_SZ)
 484                return -EINVAL;
 485
 486        /*
 487         * Always clear it after each SW sequencer operation regardless
 488         * of whether it is successful or not.
 489         */
 490        atomic_preopcode = ispi->atomic_preopcode;
 491        ispi->atomic_preopcode = 0;
 492
 493        /* Only mark 'Data Cycle' bit when there is data to be transferred */
 494        if (len > 0)
 495                val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
 496        val |= ret << SSFSTS_CTL_COP_SHIFT;
 497        val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
 498        val |= SSFSTS_CTL_SCGO;
 499        if (atomic_preopcode) {
 500                u16 preop;
 501
 502                switch (optype) {
 503                case OPTYPE_WRITE_NO_ADDR:
 504                case OPTYPE_WRITE_WITH_ADDR:
 505                        /* Pick matching preopcode for the atomic sequence */
 506                        preop = readw(ispi->sregs + PREOP_OPTYPE);
 507                        if ((preop & 0xff) == atomic_preopcode)
 508                                ; /* Do nothing */
 509                        else if ((preop >> 8) == atomic_preopcode)
 510                                val |= SSFSTS_CTL_SPOP;
 511                        else
 512                                return -EINVAL;
 513
 514                        /* Enable atomic sequence */
 515                        val |= SSFSTS_CTL_ACS;
 516                        break;
 517
 518                default:
 519                        return -EINVAL;
 520                }
 521
 522        }
 523        writel(val, ispi->sregs + SSFSTS_CTL);
 524
 525        ret = intel_spi_wait_sw_busy(ispi);
 526        if (ret)
 527                return ret;
 528
 529        status = readl(ispi->sregs + SSFSTS_CTL);
 530        if (status & SSFSTS_CTL_FCERR)
 531                return -EIO;
 532        else if (status & SSFSTS_CTL_AEL)
 533                return -EACCES;
 534
 535        return 0;
 536}
 537
 538static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
 539{
 540        struct intel_spi *ispi = nor->priv;
 541        int ret;
 542
 543        /* Address of the first chip */
 544        writel(0, ispi->base + FADDR);
 545
 546        if (ispi->swseq_reg)
 547                ret = intel_spi_sw_cycle(ispi, opcode, len,
 548                                         OPTYPE_READ_NO_ADDR);
 549        else
 550                ret = intel_spi_hw_cycle(ispi, opcode, len);
 551
 552        if (ret)
 553                return ret;
 554
 555        return intel_spi_read_block(ispi, buf, len);
 556}
 557
 558static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
 559{
 560        struct intel_spi *ispi = nor->priv;
 561        int ret;
 562
 563        /*
 564         * This is handled with atomic operation and preop code in Intel
 565         * controller so we only verify that it is available. If the
 566         * controller is not locked, program the opcode to the PREOP
 567         * register for later use.
 568         *
 569         * When hardware sequencer is used there is no need to program
 570         * any opcodes (it handles them automatically as part of a command).
 571         */
 572        if (opcode == SPINOR_OP_WREN) {
 573                u16 preop;
 574
 575                if (!ispi->swseq_reg)
 576                        return 0;
 577
 578                preop = readw(ispi->sregs + PREOP_OPTYPE);
 579                if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
 580                        if (ispi->locked)
 581                                return -EINVAL;
 582                        writel(opcode, ispi->sregs + PREOP_OPTYPE);
 583                }
 584
 585                /*
 586                 * This enables atomic sequence on next SW sycle. Will
 587                 * be cleared after next operation.
 588                 */
 589                ispi->atomic_preopcode = opcode;
 590                return 0;
 591        }
 592
 593        writel(0, ispi->base + FADDR);
 594
 595        /* Write the value beforehand */
 596        ret = intel_spi_write_block(ispi, buf, len);
 597        if (ret)
 598                return ret;
 599
 600        if (ispi->swseq_reg)
 601                return intel_spi_sw_cycle(ispi, opcode, len,
 602                                          OPTYPE_WRITE_NO_ADDR);
 603        return intel_spi_hw_cycle(ispi, opcode, len);
 604}
 605
 606static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
 607                              u_char *read_buf)
 608{
 609        struct intel_spi *ispi = nor->priv;
 610        size_t block_size, retlen = 0;
 611        u32 val, status;
 612        ssize_t ret;
 613
 614        /*
 615         * Atomic sequence is not expected with HW sequencer reads. Make
 616         * sure it is cleared regardless.
 617         */
 618        if (WARN_ON_ONCE(ispi->atomic_preopcode))
 619                ispi->atomic_preopcode = 0;
 620
 621        switch (nor->read_opcode) {
 622        case SPINOR_OP_READ:
 623        case SPINOR_OP_READ_FAST:
 624        case SPINOR_OP_READ_4B:
 625        case SPINOR_OP_READ_FAST_4B:
 626                break;
 627        default:
 628                return -EINVAL;
 629        }
 630
 631        while (len > 0) {
 632                block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
 633
 634                /* Read cannot cross 4K boundary */
 635                block_size = min_t(loff_t, from + block_size,
 636                                   round_up(from + 1, SZ_4K)) - from;
 637
 638                writel(from, ispi->base + FADDR);
 639
 640                val = readl(ispi->base + HSFSTS_CTL);
 641                val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
 642                val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
 643                val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
 644                val |= HSFSTS_CTL_FCYCLE_READ;
 645                val |= HSFSTS_CTL_FGO;
 646                writel(val, ispi->base + HSFSTS_CTL);
 647
 648                ret = intel_spi_wait_hw_busy(ispi);
 649                if (ret)
 650                        return ret;
 651
 652                status = readl(ispi->base + HSFSTS_CTL);
 653                if (status & HSFSTS_CTL_FCERR)
 654                        ret = -EIO;
 655                else if (status & HSFSTS_CTL_AEL)
 656                        ret = -EACCES;
 657
 658                if (ret < 0) {
 659                        dev_err(ispi->dev, "read error: %llx: %#x\n", from,
 660                                status);
 661                        return ret;
 662                }
 663
 664                ret = intel_spi_read_block(ispi, read_buf, block_size);
 665                if (ret)
 666                        return ret;
 667
 668                len -= block_size;
 669                from += block_size;
 670                retlen += block_size;
 671                read_buf += block_size;
 672        }
 673
 674        return retlen;
 675}
 676
 677static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
 678                               const u_char *write_buf)
 679{
 680        struct intel_spi *ispi = nor->priv;
 681        size_t block_size, retlen = 0;
 682        u32 val, status;
 683        ssize_t ret;
 684
 685        /* Not needed with HW sequencer write, make sure it is cleared */
 686        ispi->atomic_preopcode = 0;
 687
 688        while (len > 0) {
 689                block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
 690
 691                /* Write cannot cross 4K boundary */
 692                block_size = min_t(loff_t, to + block_size,
 693                                   round_up(to + 1, SZ_4K)) - to;
 694
 695                writel(to, ispi->base + FADDR);
 696
 697                val = readl(ispi->base + HSFSTS_CTL);
 698                val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
 699                val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
 700                val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
 701                val |= HSFSTS_CTL_FCYCLE_WRITE;
 702
 703                ret = intel_spi_write_block(ispi, write_buf, block_size);
 704                if (ret) {
 705                        dev_err(ispi->dev, "failed to write block\n");
 706                        return ret;
 707                }
 708
 709                /* Start the write now */
 710                val |= HSFSTS_CTL_FGO;
 711                writel(val, ispi->base + HSFSTS_CTL);
 712
 713                ret = intel_spi_wait_hw_busy(ispi);
 714                if (ret) {
 715                        dev_err(ispi->dev, "timeout\n");
 716                        return ret;
 717                }
 718
 719                status = readl(ispi->base + HSFSTS_CTL);
 720                if (status & HSFSTS_CTL_FCERR)
 721                        ret = -EIO;
 722                else if (status & HSFSTS_CTL_AEL)
 723                        ret = -EACCES;
 724
 725                if (ret < 0) {
 726                        dev_err(ispi->dev, "write error: %llx: %#x\n", to,
 727                                status);
 728                        return ret;
 729                }
 730
 731                len -= block_size;
 732                to += block_size;
 733                retlen += block_size;
 734                write_buf += block_size;
 735        }
 736
 737        return retlen;
 738}
 739
 740static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
 741{
 742        size_t erase_size, len = nor->mtd.erasesize;
 743        struct intel_spi *ispi = nor->priv;
 744        u32 val, status, cmd;
 745        int ret;
 746
 747        /* If the hardware can do 64k erase use that when possible */
 748        if (len >= SZ_64K && ispi->erase_64k) {
 749                cmd = HSFSTS_CTL_FCYCLE_ERASE_64K;
 750                erase_size = SZ_64K;
 751        } else {
 752                cmd = HSFSTS_CTL_FCYCLE_ERASE;
 753                erase_size = SZ_4K;
 754        }
 755
 756        if (ispi->swseq_erase) {
 757                while (len > 0) {
 758                        writel(offs, ispi->base + FADDR);
 759
 760                        ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
 761                                                 0, OPTYPE_WRITE_WITH_ADDR);
 762                        if (ret)
 763                                return ret;
 764
 765                        offs += erase_size;
 766                        len -= erase_size;
 767                }
 768
 769                return 0;
 770        }
 771
 772        /* Not needed with HW sequencer erase, make sure it is cleared */
 773        ispi->atomic_preopcode = 0;
 774
 775        while (len > 0) {
 776                writel(offs, ispi->base + FADDR);
 777
 778                val = readl(ispi->base + HSFSTS_CTL);
 779                val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
 780                val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
 781                val |= cmd;
 782                val |= HSFSTS_CTL_FGO;
 783                writel(val, ispi->base + HSFSTS_CTL);
 784
 785                ret = intel_spi_wait_hw_busy(ispi);
 786                if (ret)
 787                        return ret;
 788
 789                status = readl(ispi->base + HSFSTS_CTL);
 790                if (status & HSFSTS_CTL_FCERR)
 791                        return -EIO;
 792                else if (status & HSFSTS_CTL_AEL)
 793                        return -EACCES;
 794
 795                offs += erase_size;
 796                len -= erase_size;
 797        }
 798
 799        return 0;
 800}
 801
 802static bool intel_spi_is_protected(const struct intel_spi *ispi,
 803                                   unsigned int base, unsigned int limit)
 804{
 805        int i;
 806
 807        for (i = 0; i < ispi->pr_num; i++) {
 808                u32 pr_base, pr_limit, pr_value;
 809
 810                pr_value = readl(ispi->pregs + PR(i));
 811                if (!(pr_value & (PR_WPE | PR_RPE)))
 812                        continue;
 813
 814                pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
 815                pr_base = pr_value & PR_BASE_MASK;
 816
 817                if (pr_base >= base && pr_limit <= limit)
 818                        return true;
 819        }
 820
 821        return false;
 822}
 823
 824/*
 825 * There will be a single partition holding all enabled flash regions. We
 826 * call this "BIOS".
 827 */
 828static void intel_spi_fill_partition(struct intel_spi *ispi,
 829                                     struct mtd_partition *part)
 830{
 831        u64 end;
 832        int i;
 833
 834        memset(part, 0, sizeof(*part));
 835
 836        /* Start from the mandatory descriptor region */
 837        part->size = 4096;
 838        part->name = "BIOS";
 839
 840        /*
 841         * Now try to find where this partition ends based on the flash
 842         * region registers.
 843         */
 844        for (i = 1; i < ispi->nregions; i++) {
 845                u32 region, base, limit;
 846
 847                region = readl(ispi->base + FREG(i));
 848                base = region & FREG_BASE_MASK;
 849                limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
 850
 851                if (base >= limit || limit == 0)
 852                        continue;
 853
 854                /*
 855                 * If any of the regions have protection bits set, make the
 856                 * whole partition read-only to be on the safe side.
 857                 */
 858                if (intel_spi_is_protected(ispi, base, limit))
 859                        ispi->writeable = false;
 860
 861                end = (limit << 12) + 4096;
 862                if (end > part->size)
 863                        part->size = end;
 864        }
 865}
 866
 867struct intel_spi *intel_spi_probe(struct device *dev,
 868        struct resource *mem, const struct intel_spi_boardinfo *info)
 869{
 870        const struct spi_nor_hwcaps hwcaps = {
 871                .mask = SNOR_HWCAPS_READ |
 872                        SNOR_HWCAPS_READ_FAST |
 873                        SNOR_HWCAPS_PP,
 874        };
 875        struct mtd_partition part;
 876        struct intel_spi *ispi;
 877        int ret;
 878
 879        if (!info || !mem)
 880                return ERR_PTR(-EINVAL);
 881
 882        ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL);
 883        if (!ispi)
 884                return ERR_PTR(-ENOMEM);
 885
 886        ispi->base = devm_ioremap_resource(dev, mem);
 887        if (IS_ERR(ispi->base))
 888                return ERR_CAST(ispi->base);
 889
 890        ispi->dev = dev;
 891        ispi->info = info;
 892        ispi->writeable = info->writeable;
 893
 894        ret = intel_spi_init(ispi);
 895        if (ret)
 896                return ERR_PTR(ret);
 897
 898        ispi->nor.dev = ispi->dev;
 899        ispi->nor.priv = ispi;
 900        ispi->nor.read_reg = intel_spi_read_reg;
 901        ispi->nor.write_reg = intel_spi_write_reg;
 902        ispi->nor.read = intel_spi_read;
 903        ispi->nor.write = intel_spi_write;
 904        ispi->nor.erase = intel_spi_erase;
 905
 906        ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps);
 907        if (ret) {
 908                dev_info(dev, "failed to locate the chip\n");
 909                return ERR_PTR(ret);
 910        }
 911
 912        intel_spi_fill_partition(ispi, &part);
 913
 914        /* Prevent writes if not explicitly enabled */
 915        if (!ispi->writeable || !writeable)
 916                ispi->nor.mtd.flags &= ~MTD_WRITEABLE;
 917
 918        ret = mtd_device_register(&ispi->nor.mtd, &part, 1);
 919        if (ret)
 920                return ERR_PTR(ret);
 921
 922        return ispi;
 923}
 924EXPORT_SYMBOL_GPL(intel_spi_probe);
 925
 926int intel_spi_remove(struct intel_spi *ispi)
 927{
 928        return mtd_device_unregister(&ispi->nor.mtd);
 929}
 930EXPORT_SYMBOL_GPL(intel_spi_remove);
 931
 932MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
 933MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
 934MODULE_LICENSE("GPL v2");
 935