linux/drivers/mtd/nand/raw/nand_hynix.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2017 Free Electrons
   3 * Copyright (C) 2017 NextThing Co
   4 *
   5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 */
  17
  18#include <linux/mtd/rawnand.h>
  19#include <linux/sizes.h>
  20#include <linux/slab.h>
  21
  22#define NAND_HYNIX_CMD_SET_PARAMS       0x36
  23#define NAND_HYNIX_CMD_APPLY_PARAMS     0x16
  24
  25#define NAND_HYNIX_1XNM_RR_REPEAT       8
  26
  27/**
  28 * struct hynix_read_retry - read-retry data
  29 * @nregs: number of register to set when applying a new read-retry mode
  30 * @regs: register offsets (NAND chip dependent)
  31 * @values: array of values to set in registers. The array size is equal to
  32 *          (nregs * nmodes)
  33 */
  34struct hynix_read_retry {
  35        int nregs;
  36        const u8 *regs;
  37        u8 values[0];
  38};
  39
  40/**
  41 * struct hynix_nand - private Hynix NAND struct
  42 * @nand_technology: manufacturing process expressed in picometer
  43 * @read_retry: read-retry information
  44 */
  45struct hynix_nand {
  46        const struct hynix_read_retry *read_retry;
  47};
  48
  49/**
  50 * struct hynix_read_retry_otp - structure describing how the read-retry OTP
  51 *                               area
  52 * @nregs: number of hynix private registers to set before reading the reading
  53 *         the OTP area
  54 * @regs: registers that should be configured
  55 * @values: values that should be set in regs
  56 * @page: the address to pass to the READ_PAGE command. Depends on the NAND
  57 *        chip
  58 * @size: size of the read-retry OTP section
  59 */
  60struct hynix_read_retry_otp {
  61        int nregs;
  62        const u8 *regs;
  63        const u8 *values;
  64        int page;
  65        int size;
  66};
  67
  68static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
  69{
  70        u8 jedecid[5] = { };
  71        int ret;
  72
  73        ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
  74        if (ret)
  75                return false;
  76
  77        return !strncmp("JEDEC", jedecid, sizeof(jedecid));
  78}
  79
  80static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
  81{
  82        struct mtd_info *mtd = nand_to_mtd(chip);
  83
  84        if (chip->exec_op) {
  85                struct nand_op_instr instrs[] = {
  86                        NAND_OP_CMD(cmd, 0),
  87                };
  88                struct nand_operation op = NAND_OPERATION(instrs);
  89
  90                return nand_exec_op(chip, &op);
  91        }
  92
  93        chip->cmdfunc(mtd, cmd, -1, -1);
  94
  95        return 0;
  96}
  97
  98static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
  99{
 100        struct mtd_info *mtd = nand_to_mtd(chip);
 101        u16 column = ((u16)addr << 8) | addr;
 102
 103        chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
 104        chip->write_byte(mtd, val);
 105
 106        return 0;
 107}
 108
 109static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
 110{
 111        struct nand_chip *chip = mtd_to_nand(mtd);
 112        struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
 113        const u8 *values;
 114        int i, ret;
 115
 116        values = hynix->read_retry->values +
 117                 (retry_mode * hynix->read_retry->nregs);
 118
 119        /* Enter 'Set Hynix Parameters' mode */
 120        ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
 121        if (ret)
 122                return ret;
 123
 124        /*
 125         * Configure the NAND in the requested read-retry mode.
 126         * This is done by setting pre-defined values in internal NAND
 127         * registers.
 128         *
 129         * The set of registers is NAND specific, and the values are either
 130         * predefined or extracted from an OTP area on the NAND (values are
 131         * probably tweaked at production in this case).
 132         */
 133        for (i = 0; i < hynix->read_retry->nregs; i++) {
 134                ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
 135                                              values[i]);
 136                if (ret)
 137                        return ret;
 138        }
 139
 140        /* Apply the new settings. */
 141        return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
 142}
 143
 144/**
 145 * hynix_get_majority - get the value that is occurring the most in a given
 146 *                      set of values
 147 * @in: the array of values to test
 148 * @repeat: the size of the in array
 149 * @out: pointer used to store the output value
 150 *
 151 * This function implements the 'majority check' logic that is supposed to
 152 * overcome the unreliability of MLC NANDs when reading the OTP area storing
 153 * the read-retry parameters.
 154 *
 155 * It's based on a pretty simple assumption: if we repeat the same value
 156 * several times and then take the one that is occurring the most, we should
 157 * find the correct value.
 158 * Let's hope this dummy algorithm prevents us from losing the read-retry
 159 * parameters.
 160 */
 161static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
 162{
 163        int i, j, half = repeat / 2;
 164
 165        /*
 166         * We only test the first half of the in array because we must ensure
 167         * that the value is at least occurring repeat / 2 times.
 168         *
 169         * This loop is suboptimal since we may count the occurrences of the
 170         * same value several time, but we are doing that on small sets, which
 171         * makes it acceptable.
 172         */
 173        for (i = 0; i < half; i++) {
 174                int cnt = 0;
 175                u8 val = in[i];
 176
 177                /* Count all values that are matching the one at index i. */
 178                for (j = i + 1; j < repeat; j++) {
 179                        if (in[j] == val)
 180                                cnt++;
 181                }
 182
 183                /* We found a value occurring more than repeat / 2. */
 184                if (cnt > half) {
 185                        *out = val;
 186                        return 0;
 187                }
 188        }
 189
 190        return -EIO;
 191}
 192
 193static int hynix_read_rr_otp(struct nand_chip *chip,
 194                             const struct hynix_read_retry_otp *info,
 195                             void *buf)
 196{
 197        int i, ret;
 198
 199        ret = nand_reset_op(chip);
 200        if (ret)
 201                return ret;
 202
 203        ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
 204        if (ret)
 205                return ret;
 206
 207        for (i = 0; i < info->nregs; i++) {
 208                ret = hynix_nand_reg_write_op(chip, info->regs[i],
 209                                              info->values[i]);
 210                if (ret)
 211                        return ret;
 212        }
 213
 214        ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
 215        if (ret)
 216                return ret;
 217
 218        /* Sequence to enter OTP mode? */
 219        ret = hynix_nand_cmd_op(chip, 0x17);
 220        if (ret)
 221                return ret;
 222
 223        ret = hynix_nand_cmd_op(chip, 0x4);
 224        if (ret)
 225                return ret;
 226
 227        ret = hynix_nand_cmd_op(chip, 0x19);
 228        if (ret)
 229                return ret;
 230
 231        /* Now read the page */
 232        ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
 233        if (ret)
 234                return ret;
 235
 236        /* Put everything back to normal */
 237        ret = nand_reset_op(chip);
 238        if (ret)
 239                return ret;
 240
 241        ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
 242        if (ret)
 243                return ret;
 244
 245        ret = hynix_nand_reg_write_op(chip, 0x38, 0);
 246        if (ret)
 247                return ret;
 248
 249        ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
 250        if (ret)
 251                return ret;
 252
 253        return nand_read_page_op(chip, 0, 0, NULL, 0);
 254}
 255
 256#define NAND_HYNIX_1XNM_RR_COUNT_OFFS                           0
 257#define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS                       8
 258#define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv)            \
 259        (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
 260
 261static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
 262                                   int mode, int reg, bool inv, u8 *val)
 263{
 264        u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
 265        int val_offs = (mode * nregs) + reg;
 266        int set_size = nmodes * nregs;
 267        int i, ret;
 268
 269        for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
 270                int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
 271
 272                tmp[i] = buf[val_offs + set_offs];
 273        }
 274
 275        ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
 276        if (ret)
 277                return ret;
 278
 279        if (inv)
 280                *val = ~*val;
 281
 282        return 0;
 283}
 284
 285static u8 hynix_1xnm_mlc_read_retry_regs[] = {
 286        0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
 287};
 288
 289static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
 290                                  const struct hynix_read_retry_otp *info)
 291{
 292        struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
 293        struct hynix_read_retry *rr = NULL;
 294        int ret, i, j;
 295        u8 nregs, nmodes;
 296        u8 *buf;
 297
 298        buf = kmalloc(info->size, GFP_KERNEL);
 299        if (!buf)
 300                return -ENOMEM;
 301
 302        ret = hynix_read_rr_otp(chip, info, buf);
 303        if (ret)
 304                goto out;
 305
 306        ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
 307                                 &nmodes);
 308        if (ret)
 309                goto out;
 310
 311        ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
 312                                 NAND_HYNIX_1XNM_RR_REPEAT,
 313                                 &nregs);
 314        if (ret)
 315                goto out;
 316
 317        rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
 318        if (!rr) {
 319                ret = -ENOMEM;
 320                goto out;
 321        }
 322
 323        for (i = 0; i < nmodes; i++) {
 324                for (j = 0; j < nregs; j++) {
 325                        u8 *val = rr->values + (i * nregs);
 326
 327                        ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
 328                                                      false, val);
 329                        if (!ret)
 330                                continue;
 331
 332                        ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
 333                                                      true, val);
 334                        if (ret)
 335                                goto out;
 336                }
 337        }
 338
 339        rr->nregs = nregs;
 340        rr->regs = hynix_1xnm_mlc_read_retry_regs;
 341        hynix->read_retry = rr;
 342        chip->setup_read_retry = hynix_nand_setup_read_retry;
 343        chip->read_retries = nmodes;
 344
 345out:
 346        kfree(buf);
 347
 348        if (ret)
 349                kfree(rr);
 350
 351        return ret;
 352}
 353
 354static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
 355static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
 356
 357static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
 358        {
 359                .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
 360                .regs = hynix_mlc_1xnm_rr_otp_regs,
 361                .values = hynix_mlc_1xnm_rr_otp_values,
 362                .page = 0x21f,
 363                .size = 784
 364        },
 365        {
 366                .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
 367                .regs = hynix_mlc_1xnm_rr_otp_regs,
 368                .values = hynix_mlc_1xnm_rr_otp_values,
 369                .page = 0x200,
 370                .size = 528,
 371        },
 372};
 373
 374static int hynix_nand_rr_init(struct nand_chip *chip)
 375{
 376        int i, ret = 0;
 377        bool valid_jedecid;
 378
 379        valid_jedecid = hynix_nand_has_valid_jedecid(chip);
 380
 381        /*
 382         * We only support read-retry for 1xnm NANDs, and those NANDs all
 383         * expose a valid JEDEC ID.
 384         */
 385        if (valid_jedecid) {
 386                u8 nand_tech = chip->id.data[5] >> 4;
 387
 388                /* 1xnm technology */
 389                if (nand_tech == 4) {
 390                        for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
 391                             i++) {
 392                                /*
 393                                 * FIXME: Hynix recommend to copy the
 394                                 * read-retry OTP area into a normal page.
 395                                 */
 396                                ret = hynix_mlc_1xnm_rr_init(chip,
 397                                                hynix_mlc_1xnm_rr_otps);
 398                                if (!ret)
 399                                        break;
 400                        }
 401                }
 402        }
 403
 404        if (ret)
 405                pr_warn("failed to initialize read-retry infrastructure");
 406
 407        return 0;
 408}
 409
 410static void hynix_nand_extract_oobsize(struct nand_chip *chip,
 411                                       bool valid_jedecid)
 412{
 413        struct mtd_info *mtd = nand_to_mtd(chip);
 414        u8 oobsize;
 415
 416        oobsize = ((chip->id.data[3] >> 2) & 0x3) |
 417                  ((chip->id.data[3] >> 4) & 0x4);
 418
 419        if (valid_jedecid) {
 420                switch (oobsize) {
 421                case 0:
 422                        mtd->oobsize = 2048;
 423                        break;
 424                case 1:
 425                        mtd->oobsize = 1664;
 426                        break;
 427                case 2:
 428                        mtd->oobsize = 1024;
 429                        break;
 430                case 3:
 431                        mtd->oobsize = 640;
 432                        break;
 433                default:
 434                        /*
 435                         * We should never reach this case, but if that
 436                         * happens, this probably means Hynix decided to use
 437                         * a different extended ID format, and we should find
 438                         * a way to support it.
 439                         */
 440                        WARN(1, "Invalid OOB size");
 441                        break;
 442                }
 443        } else {
 444                switch (oobsize) {
 445                case 0:
 446                        mtd->oobsize = 128;
 447                        break;
 448                case 1:
 449                        mtd->oobsize = 224;
 450                        break;
 451                case 2:
 452                        mtd->oobsize = 448;
 453                        break;
 454                case 3:
 455                        mtd->oobsize = 64;
 456                        break;
 457                case 4:
 458                        mtd->oobsize = 32;
 459                        break;
 460                case 5:
 461                        mtd->oobsize = 16;
 462                        break;
 463                case 6:
 464                        mtd->oobsize = 640;
 465                        break;
 466                default:
 467                        /*
 468                         * We should never reach this case, but if that
 469                         * happens, this probably means Hynix decided to use
 470                         * a different extended ID format, and we should find
 471                         * a way to support it.
 472                         */
 473                        WARN(1, "Invalid OOB size");
 474                        break;
 475                }
 476        }
 477}
 478
 479static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
 480                                                bool valid_jedecid)
 481{
 482        u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
 483
 484        if (valid_jedecid) {
 485                /* Reference: H27UCG8T2E datasheet */
 486                chip->ecc_step_ds = 1024;
 487
 488                switch (ecc_level) {
 489                case 0:
 490                        chip->ecc_step_ds = 0;
 491                        chip->ecc_strength_ds = 0;
 492                        break;
 493                case 1:
 494                        chip->ecc_strength_ds = 4;
 495                        break;
 496                case 2:
 497                        chip->ecc_strength_ds = 24;
 498                        break;
 499                case 3:
 500                        chip->ecc_strength_ds = 32;
 501                        break;
 502                case 4:
 503                        chip->ecc_strength_ds = 40;
 504                        break;
 505                case 5:
 506                        chip->ecc_strength_ds = 50;
 507                        break;
 508                case 6:
 509                        chip->ecc_strength_ds = 60;
 510                        break;
 511                default:
 512                        /*
 513                         * We should never reach this case, but if that
 514                         * happens, this probably means Hynix decided to use
 515                         * a different extended ID format, and we should find
 516                         * a way to support it.
 517                         */
 518                        WARN(1, "Invalid ECC requirements");
 519                }
 520        } else {
 521                /*
 522                 * The ECC requirements field meaning depends on the
 523                 * NAND technology.
 524                 */
 525                u8 nand_tech = chip->id.data[5] & 0x7;
 526
 527                if (nand_tech < 3) {
 528                        /* > 26nm, reference: H27UBG8T2A datasheet */
 529                        if (ecc_level < 5) {
 530                                chip->ecc_step_ds = 512;
 531                                chip->ecc_strength_ds = 1 << ecc_level;
 532                        } else if (ecc_level < 7) {
 533                                if (ecc_level == 5)
 534                                        chip->ecc_step_ds = 2048;
 535                                else
 536                                        chip->ecc_step_ds = 1024;
 537                                chip->ecc_strength_ds = 24;
 538                        } else {
 539                                /*
 540                                 * We should never reach this case, but if that
 541                                 * happens, this probably means Hynix decided
 542                                 * to use a different extended ID format, and
 543                                 * we should find a way to support it.
 544                                 */
 545                                WARN(1, "Invalid ECC requirements");
 546                        }
 547                } else {
 548                        /* <= 26nm, reference: H27UBG8T2B datasheet */
 549                        if (!ecc_level) {
 550                                chip->ecc_step_ds = 0;
 551                                chip->ecc_strength_ds = 0;
 552                        } else if (ecc_level < 5) {
 553                                chip->ecc_step_ds = 512;
 554                                chip->ecc_strength_ds = 1 << (ecc_level - 1);
 555                        } else {
 556                                chip->ecc_step_ds = 1024;
 557                                chip->ecc_strength_ds = 24 +
 558                                                        (8 * (ecc_level - 5));
 559                        }
 560                }
 561        }
 562}
 563
 564static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
 565                                                       bool valid_jedecid)
 566{
 567        u8 nand_tech;
 568
 569        /* We need scrambling on all TLC NANDs*/
 570        if (chip->bits_per_cell > 2)
 571                chip->options |= NAND_NEED_SCRAMBLING;
 572
 573        /* And on MLC NANDs with sub-3xnm process */
 574        if (valid_jedecid) {
 575                nand_tech = chip->id.data[5] >> 4;
 576
 577                /* < 3xnm */
 578                if (nand_tech > 0)
 579                        chip->options |= NAND_NEED_SCRAMBLING;
 580        } else {
 581                nand_tech = chip->id.data[5] & 0x7;
 582
 583                /* < 32nm */
 584                if (nand_tech > 2)
 585                        chip->options |= NAND_NEED_SCRAMBLING;
 586        }
 587}
 588
 589static void hynix_nand_decode_id(struct nand_chip *chip)
 590{
 591        struct mtd_info *mtd = nand_to_mtd(chip);
 592        bool valid_jedecid;
 593        u8 tmp;
 594
 595        /*
 596         * Exclude all SLC NANDs from this advanced detection scheme.
 597         * According to the ranges defined in several datasheets, it might
 598         * appear that even SLC NANDs could fall in this extended ID scheme.
 599         * If that the case rework the test to let SLC NANDs go through the
 600         * detection process.
 601         */
 602        if (chip->id.len < 6 || nand_is_slc(chip)) {
 603                nand_decode_ext_id(chip);
 604                return;
 605        }
 606
 607        /* Extract pagesize */
 608        mtd->writesize = 2048 << (chip->id.data[3] & 0x03);
 609
 610        tmp = (chip->id.data[3] >> 4) & 0x3;
 611        /*
 612         * When bit7 is set that means we start counting at 1MiB, otherwise
 613         * we start counting at 128KiB and shift this value the content of
 614         * ID[3][4:5].
 615         * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
 616         * this case the erasesize is set to 768KiB.
 617         */
 618        if (chip->id.data[3] & 0x80)
 619                mtd->erasesize = SZ_1M << tmp;
 620        else if (tmp == 3)
 621                mtd->erasesize = SZ_512K + SZ_256K;
 622        else
 623                mtd->erasesize = SZ_128K << tmp;
 624
 625        /*
 626         * Modern Toggle DDR NANDs have a valid JEDECID even though they are
 627         * not exposing a valid JEDEC parameter table.
 628         * These NANDs use a different NAND ID scheme.
 629         */
 630        valid_jedecid = hynix_nand_has_valid_jedecid(chip);
 631
 632        hynix_nand_extract_oobsize(chip, valid_jedecid);
 633        hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
 634        hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
 635}
 636
 637static void hynix_nand_cleanup(struct nand_chip *chip)
 638{
 639        struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
 640
 641        if (!hynix)
 642                return;
 643
 644        kfree(hynix->read_retry);
 645        kfree(hynix);
 646        nand_set_manufacturer_data(chip, NULL);
 647}
 648
 649static int hynix_nand_init(struct nand_chip *chip)
 650{
 651        struct hynix_nand *hynix;
 652        int ret;
 653
 654        if (!nand_is_slc(chip))
 655                chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
 656        else
 657                chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
 658
 659        hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
 660        if (!hynix)
 661                return -ENOMEM;
 662
 663        nand_set_manufacturer_data(chip, hynix);
 664
 665        ret = hynix_nand_rr_init(chip);
 666        if (ret)
 667                hynix_nand_cleanup(chip);
 668
 669        return ret;
 670}
 671
 672const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
 673        .detect = hynix_nand_decode_id,
 674        .init = hynix_nand_init,
 675        .cleanup = hynix_nand_cleanup,
 676};
 677