linux/drivers/mtd/nand/nand_hynix.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2017 Free Electrons
   3 * Copyright (C) 2017 NextThing Co
   4 *
   5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 */
  17
  18#include <linux/mtd/nand.h>
  19#include <linux/sizes.h>
  20#include <linux/slab.h>
  21
  22#define NAND_HYNIX_CMD_SET_PARAMS       0x36
  23#define NAND_HYNIX_CMD_APPLY_PARAMS     0x16
  24
  25#define NAND_HYNIX_1XNM_RR_REPEAT       8
  26
  27/**
  28 * struct hynix_read_retry - read-retry data
  29 * @nregs: number of register to set when applying a new read-retry mode
  30 * @regs: register offsets (NAND chip dependent)
  31 * @values: array of values to set in registers. The array size is equal to
  32 *          (nregs * nmodes)
  33 */
  34struct hynix_read_retry {
  35        int nregs;
  36        const u8 *regs;
  37        u8 values[0];
  38};
  39
  40/**
  41 * struct hynix_nand - private Hynix NAND struct
  42 * @nand_technology: manufacturing process expressed in picometer
  43 * @read_retry: read-retry information
  44 */
  45struct hynix_nand {
  46        const struct hynix_read_retry *read_retry;
  47};
  48
  49/**
  50 * struct hynix_read_retry_otp - structure describing how the read-retry OTP
  51 *                               area
  52 * @nregs: number of hynix private registers to set before reading the reading
  53 *         the OTP area
  54 * @regs: registers that should be configured
  55 * @values: values that should be set in regs
  56 * @page: the address to pass to the READ_PAGE command. Depends on the NAND
  57 *        chip
  58 * @size: size of the read-retry OTP section
  59 */
  60struct hynix_read_retry_otp {
  61        int nregs;
  62        const u8 *regs;
  63        const u8 *values;
  64        int page;
  65        int size;
  66};
  67
  68static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
  69{
  70        struct mtd_info *mtd = nand_to_mtd(chip);
  71        u8 jedecid[6] = { };
  72        int i = 0;
  73
  74        chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
  75        for (i = 0; i < 5; i++)
  76                jedecid[i] = chip->read_byte(mtd);
  77
  78        return !strcmp("JEDEC", jedecid);
  79}
  80
  81static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
  82{
  83        struct nand_chip *chip = mtd_to_nand(mtd);
  84        struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
  85        const u8 *values;
  86        int status;
  87        int i;
  88
  89        values = hynix->read_retry->values +
  90                 (retry_mode * hynix->read_retry->nregs);
  91
  92        /* Enter 'Set Hynix Parameters' mode */
  93        chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
  94
  95        /*
  96         * Configure the NAND in the requested read-retry mode.
  97         * This is done by setting pre-defined values in internal NAND
  98         * registers.
  99         *
 100         * The set of registers is NAND specific, and the values are either
 101         * predefined or extracted from an OTP area on the NAND (values are
 102         * probably tweaked at production in this case).
 103         */
 104        for (i = 0; i < hynix->read_retry->nregs; i++) {
 105                int column = hynix->read_retry->regs[i];
 106
 107                column |= column << 8;
 108                chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
 109                chip->write_byte(mtd, values[i]);
 110        }
 111
 112        /* Apply the new settings. */
 113        chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
 114
 115        status = chip->waitfunc(mtd, chip);
 116        if (status & NAND_STATUS_FAIL)
 117                return -EIO;
 118
 119        return 0;
 120}
 121
 122/**
 123 * hynix_get_majority - get the value that is occurring the most in a given
 124 *                      set of values
 125 * @in: the array of values to test
 126 * @repeat: the size of the in array
 127 * @out: pointer used to store the output value
 128 *
 129 * This function implements the 'majority check' logic that is supposed to
 130 * overcome the unreliability of MLC NANDs when reading the OTP area storing
 131 * the read-retry parameters.
 132 *
 133 * It's based on a pretty simple assumption: if we repeat the same value
 134 * several times and then take the one that is occurring the most, we should
 135 * find the correct value.
 136 * Let's hope this dummy algorithm prevents us from losing the read-retry
 137 * parameters.
 138 */
 139static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
 140{
 141        int i, j, half = repeat / 2;
 142
 143        /*
 144         * We only test the first half of the in array because we must ensure
 145         * that the value is at least occurring repeat / 2 times.
 146         *
 147         * This loop is suboptimal since we may count the occurrences of the
 148         * same value several time, but we are doing that on small sets, which
 149         * makes it acceptable.
 150         */
 151        for (i = 0; i < half; i++) {
 152                int cnt = 0;
 153                u8 val = in[i];
 154
 155                /* Count all values that are matching the one at index i. */
 156                for (j = i + 1; j < repeat; j++) {
 157                        if (in[j] == val)
 158                                cnt++;
 159                }
 160
 161                /* We found a value occurring more than repeat / 2. */
 162                if (cnt > half) {
 163                        *out = val;
 164                        return 0;
 165                }
 166        }
 167
 168        return -EIO;
 169}
 170
 171static int hynix_read_rr_otp(struct nand_chip *chip,
 172                             const struct hynix_read_retry_otp *info,
 173                             void *buf)
 174{
 175        struct mtd_info *mtd = nand_to_mtd(chip);
 176        int i;
 177
 178        chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
 179
 180        chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
 181
 182        for (i = 0; i < info->nregs; i++) {
 183                int column = info->regs[i];
 184
 185                column |= column << 8;
 186                chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
 187                chip->write_byte(mtd, info->values[i]);
 188        }
 189
 190        chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
 191
 192        /* Sequence to enter OTP mode? */
 193        chip->cmdfunc(mtd, 0x17, -1, -1);
 194        chip->cmdfunc(mtd, 0x04, -1, -1);
 195        chip->cmdfunc(mtd, 0x19, -1, -1);
 196
 197        /* Now read the page */
 198        chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, info->page);
 199        chip->read_buf(mtd, buf, info->size);
 200
 201        /* Put everything back to normal */
 202        chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
 203        chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, 0x38, -1);
 204        chip->write_byte(mtd, 0x0);
 205        chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
 206        chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, -1);
 207
 208        return 0;
 209}
 210
 211#define NAND_HYNIX_1XNM_RR_COUNT_OFFS                           0
 212#define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS                       8
 213#define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv)            \
 214        (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
 215
 216static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
 217                                   int mode, int reg, bool inv, u8 *val)
 218{
 219        u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
 220        int val_offs = (mode * nregs) + reg;
 221        int set_size = nmodes * nregs;
 222        int i, ret;
 223
 224        for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
 225                int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
 226
 227                tmp[i] = buf[val_offs + set_offs];
 228        }
 229
 230        ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
 231        if (ret)
 232                return ret;
 233
 234        if (inv)
 235                *val = ~*val;
 236
 237        return 0;
 238}
 239
 240static u8 hynix_1xnm_mlc_read_retry_regs[] = {
 241        0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
 242};
 243
 244static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
 245                                  const struct hynix_read_retry_otp *info)
 246{
 247        struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
 248        struct hynix_read_retry *rr = NULL;
 249        int ret, i, j;
 250        u8 nregs, nmodes;
 251        u8 *buf;
 252
 253        buf = kmalloc(info->size, GFP_KERNEL);
 254        if (!buf)
 255                return -ENOMEM;
 256
 257        ret = hynix_read_rr_otp(chip, info, buf);
 258        if (ret)
 259                goto out;
 260
 261        ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
 262                                 &nmodes);
 263        if (ret)
 264                goto out;
 265
 266        ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
 267                                 NAND_HYNIX_1XNM_RR_REPEAT,
 268                                 &nregs);
 269        if (ret)
 270                goto out;
 271
 272        rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
 273        if (!rr) {
 274                ret = -ENOMEM;
 275                goto out;
 276        }
 277
 278        for (i = 0; i < nmodes; i++) {
 279                for (j = 0; j < nregs; j++) {
 280                        u8 *val = rr->values + (i * nregs);
 281
 282                        ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
 283                                                      false, val);
 284                        if (!ret)
 285                                continue;
 286
 287                        ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
 288                                                      true, val);
 289                        if (ret)
 290                                goto out;
 291                }
 292        }
 293
 294        rr->nregs = nregs;
 295        rr->regs = hynix_1xnm_mlc_read_retry_regs;
 296        hynix->read_retry = rr;
 297        chip->setup_read_retry = hynix_nand_setup_read_retry;
 298        chip->read_retries = nmodes;
 299
 300out:
 301        kfree(buf);
 302
 303        if (ret)
 304                kfree(rr);
 305
 306        return ret;
 307}
 308
 309static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
 310static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
 311
 312static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
 313        {
 314                .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
 315                .regs = hynix_mlc_1xnm_rr_otp_regs,
 316                .values = hynix_mlc_1xnm_rr_otp_values,
 317                .page = 0x21f,
 318                .size = 784
 319        },
 320        {
 321                .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
 322                .regs = hynix_mlc_1xnm_rr_otp_regs,
 323                .values = hynix_mlc_1xnm_rr_otp_values,
 324                .page = 0x200,
 325                .size = 528,
 326        },
 327};
 328
 329static int hynix_nand_rr_init(struct nand_chip *chip)
 330{
 331        int i, ret = 0;
 332        bool valid_jedecid;
 333
 334        valid_jedecid = hynix_nand_has_valid_jedecid(chip);
 335
 336        /*
 337         * We only support read-retry for 1xnm NANDs, and those NANDs all
 338         * expose a valid JEDEC ID.
 339         */
 340        if (valid_jedecid) {
 341                u8 nand_tech = chip->id.data[5] >> 4;
 342
 343                /* 1xnm technology */
 344                if (nand_tech == 4) {
 345                        for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
 346                             i++) {
 347                                /*
 348                                 * FIXME: Hynix recommend to copy the
 349                                 * read-retry OTP area into a normal page.
 350                                 */
 351                                ret = hynix_mlc_1xnm_rr_init(chip,
 352                                                hynix_mlc_1xnm_rr_otps);
 353                                if (!ret)
 354                                        break;
 355                        }
 356                }
 357        }
 358
 359        if (ret)
 360                pr_warn("failed to initialize read-retry infrastructure");
 361
 362        return 0;
 363}
 364
 365static void hynix_nand_extract_oobsize(struct nand_chip *chip,
 366                                       bool valid_jedecid)
 367{
 368        struct mtd_info *mtd = nand_to_mtd(chip);
 369        u8 oobsize;
 370
 371        oobsize = ((chip->id.data[3] >> 2) & 0x3) |
 372                  ((chip->id.data[3] >> 4) & 0x4);
 373
 374        if (valid_jedecid) {
 375                switch (oobsize) {
 376                case 0:
 377                        mtd->oobsize = 2048;
 378                        break;
 379                case 1:
 380                        mtd->oobsize = 1664;
 381                        break;
 382                case 2:
 383                        mtd->oobsize = 1024;
 384                        break;
 385                case 3:
 386                        mtd->oobsize = 640;
 387                        break;
 388                default:
 389                        /*
 390                         * We should never reach this case, but if that
 391                         * happens, this probably means Hynix decided to use
 392                         * a different extended ID format, and we should find
 393                         * a way to support it.
 394                         */
 395                        WARN(1, "Invalid OOB size");
 396                        break;
 397                }
 398        } else {
 399                switch (oobsize) {
 400                case 0:
 401                        mtd->oobsize = 128;
 402                        break;
 403                case 1:
 404                        mtd->oobsize = 224;
 405                        break;
 406                case 2:
 407                        mtd->oobsize = 448;
 408                        break;
 409                case 3:
 410                        mtd->oobsize = 64;
 411                        break;
 412                case 4:
 413                        mtd->oobsize = 32;
 414                        break;
 415                case 5:
 416                        mtd->oobsize = 16;
 417                        break;
 418                case 6:
 419                        mtd->oobsize = 640;
 420                        break;
 421                default:
 422                        /*
 423                         * We should never reach this case, but if that
 424                         * happens, this probably means Hynix decided to use
 425                         * a different extended ID format, and we should find
 426                         * a way to support it.
 427                         */
 428                        WARN(1, "Invalid OOB size");
 429                        break;
 430                }
 431        }
 432}
 433
 434static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
 435                                                bool valid_jedecid)
 436{
 437        u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
 438
 439        if (valid_jedecid) {
 440                /* Reference: H27UCG8T2E datasheet */
 441                chip->ecc_step_ds = 1024;
 442
 443                switch (ecc_level) {
 444                case 0:
 445                        chip->ecc_step_ds = 0;
 446                        chip->ecc_strength_ds = 0;
 447                        break;
 448                case 1:
 449                        chip->ecc_strength_ds = 4;
 450                        break;
 451                case 2:
 452                        chip->ecc_strength_ds = 24;
 453                        break;
 454                case 3:
 455                        chip->ecc_strength_ds = 32;
 456                        break;
 457                case 4:
 458                        chip->ecc_strength_ds = 40;
 459                        break;
 460                case 5:
 461                        chip->ecc_strength_ds = 50;
 462                        break;
 463                case 6:
 464                        chip->ecc_strength_ds = 60;
 465                        break;
 466                default:
 467                        /*
 468                         * We should never reach this case, but if that
 469                         * happens, this probably means Hynix decided to use
 470                         * a different extended ID format, and we should find
 471                         * a way to support it.
 472                         */
 473                        WARN(1, "Invalid ECC requirements");
 474                }
 475        } else {
 476                /*
 477                 * The ECC requirements field meaning depends on the
 478                 * NAND technology.
 479                 */
 480                u8 nand_tech = chip->id.data[5] & 0x3;
 481
 482                if (nand_tech < 3) {
 483                        /* > 26nm, reference: H27UBG8T2A datasheet */
 484                        if (ecc_level < 5) {
 485                                chip->ecc_step_ds = 512;
 486                                chip->ecc_strength_ds = 1 << ecc_level;
 487                        } else if (ecc_level < 7) {
 488                                if (ecc_level == 5)
 489                                        chip->ecc_step_ds = 2048;
 490                                else
 491                                        chip->ecc_step_ds = 1024;
 492                                chip->ecc_strength_ds = 24;
 493                        } else {
 494                                /*
 495                                 * We should never reach this case, but if that
 496                                 * happens, this probably means Hynix decided
 497                                 * to use a different extended ID format, and
 498                                 * we should find a way to support it.
 499                                 */
 500                                WARN(1, "Invalid ECC requirements");
 501                        }
 502                } else {
 503                        /* <= 26nm, reference: H27UBG8T2B datasheet */
 504                        if (!ecc_level) {
 505                                chip->ecc_step_ds = 0;
 506                                chip->ecc_strength_ds = 0;
 507                        } else if (ecc_level < 5) {
 508                                chip->ecc_step_ds = 512;
 509                                chip->ecc_strength_ds = 1 << (ecc_level - 1);
 510                        } else {
 511                                chip->ecc_step_ds = 1024;
 512                                chip->ecc_strength_ds = 24 +
 513                                                        (8 * (ecc_level - 5));
 514                        }
 515                }
 516        }
 517}
 518
 519static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
 520                                                       bool valid_jedecid)
 521{
 522        u8 nand_tech;
 523
 524        /* We need scrambling on all TLC NANDs*/
 525        if (chip->bits_per_cell > 2)
 526                chip->options |= NAND_NEED_SCRAMBLING;
 527
 528        /* And on MLC NANDs with sub-3xnm process */
 529        if (valid_jedecid) {
 530                nand_tech = chip->id.data[5] >> 4;
 531
 532                /* < 3xnm */
 533                if (nand_tech > 0)
 534                        chip->options |= NAND_NEED_SCRAMBLING;
 535        } else {
 536                nand_tech = chip->id.data[5] & 0x3;
 537
 538                /* < 32nm */
 539                if (nand_tech > 2)
 540                        chip->options |= NAND_NEED_SCRAMBLING;
 541        }
 542}
 543
 544static void hynix_nand_decode_id(struct nand_chip *chip)
 545{
 546        struct mtd_info *mtd = nand_to_mtd(chip);
 547        bool valid_jedecid;
 548        u8 tmp;
 549
 550        /*
 551         * Exclude all SLC NANDs from this advanced detection scheme.
 552         * According to the ranges defined in several datasheets, it might
 553         * appear that even SLC NANDs could fall in this extended ID scheme.
 554         * If that the case rework the test to let SLC NANDs go through the
 555         * detection process.
 556         */
 557        if (chip->id.len < 6 || nand_is_slc(chip)) {
 558                nand_decode_ext_id(chip);
 559                return;
 560        }
 561
 562        /* Extract pagesize */
 563        mtd->writesize = 2048 << (chip->id.data[3] & 0x03);
 564
 565        tmp = (chip->id.data[3] >> 4) & 0x3;
 566        /*
 567         * When bit7 is set that means we start counting at 1MiB, otherwise
 568         * we start counting at 128KiB and shift this value the content of
 569         * ID[3][4:5].
 570         * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
 571         * this case the erasesize is set to 768KiB.
 572         */
 573        if (chip->id.data[3] & 0x80)
 574                mtd->erasesize = SZ_1M << tmp;
 575        else if (tmp == 3)
 576                mtd->erasesize = SZ_512K + SZ_256K;
 577        else
 578                mtd->erasesize = SZ_128K << tmp;
 579
 580        /*
 581         * Modern Toggle DDR NANDs have a valid JEDECID even though they are
 582         * not exposing a valid JEDEC parameter table.
 583         * These NANDs use a different NAND ID scheme.
 584         */
 585        valid_jedecid = hynix_nand_has_valid_jedecid(chip);
 586
 587        hynix_nand_extract_oobsize(chip, valid_jedecid);
 588        hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
 589        hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
 590}
 591
 592static void hynix_nand_cleanup(struct nand_chip *chip)
 593{
 594        struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
 595
 596        if (!hynix)
 597                return;
 598
 599        kfree(hynix->read_retry);
 600        kfree(hynix);
 601        nand_set_manufacturer_data(chip, NULL);
 602}
 603
 604static int hynix_nand_init(struct nand_chip *chip)
 605{
 606        struct hynix_nand *hynix;
 607        int ret;
 608
 609        if (!nand_is_slc(chip))
 610                chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
 611        else
 612                chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
 613
 614        hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
 615        if (!hynix)
 616                return -ENOMEM;
 617
 618        nand_set_manufacturer_data(chip, hynix);
 619
 620        ret = hynix_nand_rr_init(chip);
 621        if (ret)
 622                hynix_nand_cleanup(chip);
 623
 624        return ret;
 625}
 626
 627const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
 628        .detect = hynix_nand_decode_id,
 629        .init = hynix_nand_init,
 630        .cleanup = hynix_nand_cleanup,
 631};
 632