linux/drivers/mtd/nand/raw/nand_hynix.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2017 Free Electrons
   3 * Copyright (C) 2017 NextThing Co
   4 *
   5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 */
  17
  18#include <linux/mtd/rawnand.h>
  19#include <linux/sizes.h>
  20#include <linux/slab.h>
  21
  22#define NAND_HYNIX_CMD_SET_PARAMS       0x36
  23#define NAND_HYNIX_CMD_APPLY_PARAMS     0x16
  24
  25#define NAND_HYNIX_1XNM_RR_REPEAT       8
  26
  27/**
  28 * struct hynix_read_retry - read-retry data
  29 * @nregs: number of register to set when applying a new read-retry mode
  30 * @regs: register offsets (NAND chip dependent)
  31 * @values: array of values to set in registers. The array size is equal to
  32 *          (nregs * nmodes)
  33 */
  34struct hynix_read_retry {
  35        int nregs;
  36        const u8 *regs;
  37        u8 values[0];
  38};
  39
  40/**
  41 * struct hynix_nand - private Hynix NAND struct
  42 * @nand_technology: manufacturing process expressed in picometer
  43 * @read_retry: read-retry information
  44 */
  45struct hynix_nand {
  46        const struct hynix_read_retry *read_retry;
  47};
  48
  49/**
  50 * struct hynix_read_retry_otp - structure describing how the read-retry OTP
  51 *                               area
  52 * @nregs: number of hynix private registers to set before reading the reading
  53 *         the OTP area
  54 * @regs: registers that should be configured
  55 * @values: values that should be set in regs
  56 * @page: the address to pass to the READ_PAGE command. Depends on the NAND
  57 *        chip
  58 * @size: size of the read-retry OTP section
  59 */
  60struct hynix_read_retry_otp {
  61        int nregs;
  62        const u8 *regs;
  63        const u8 *values;
  64        int page;
  65        int size;
  66};
  67
  68static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
  69{
  70        u8 jedecid[5] = { };
  71        int ret;
  72
  73        ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
  74        if (ret)
  75                return false;
  76
  77        return !strncmp("JEDEC", jedecid, sizeof(jedecid));
  78}
  79
  80static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
  81{
  82        struct mtd_info *mtd = nand_to_mtd(chip);
  83
  84        if (chip->exec_op) {
  85                struct nand_op_instr instrs[] = {
  86                        NAND_OP_CMD(cmd, 0),
  87                };
  88                struct nand_operation op = NAND_OPERATION(instrs);
  89
  90                return nand_exec_op(chip, &op);
  91        }
  92
  93        chip->cmdfunc(mtd, cmd, -1, -1);
  94
  95        return 0;
  96}
  97
  98static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
  99{
 100        struct mtd_info *mtd = nand_to_mtd(chip);
 101        u16 column = ((u16)addr << 8) | addr;
 102
 103        if (chip->exec_op) {
 104                struct nand_op_instr instrs[] = {
 105                        NAND_OP_ADDR(1, &addr, 0),
 106                        NAND_OP_8BIT_DATA_OUT(1, &val, 0),
 107                };
 108                struct nand_operation op = NAND_OPERATION(instrs);
 109
 110                return nand_exec_op(chip, &op);
 111        }
 112
 113        chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
 114        chip->write_byte(mtd, val);
 115
 116        return 0;
 117}
 118
 119static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
 120{
 121        struct nand_chip *chip = mtd_to_nand(mtd);
 122        struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
 123        const u8 *values;
 124        int i, ret;
 125
 126        values = hynix->read_retry->values +
 127                 (retry_mode * hynix->read_retry->nregs);
 128
 129        /* Enter 'Set Hynix Parameters' mode */
 130        ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
 131        if (ret)
 132                return ret;
 133
 134        /*
 135         * Configure the NAND in the requested read-retry mode.
 136         * This is done by setting pre-defined values in internal NAND
 137         * registers.
 138         *
 139         * The set of registers is NAND specific, and the values are either
 140         * predefined or extracted from an OTP area on the NAND (values are
 141         * probably tweaked at production in this case).
 142         */
 143        for (i = 0; i < hynix->read_retry->nregs; i++) {
 144                ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
 145                                              values[i]);
 146                if (ret)
 147                        return ret;
 148        }
 149
 150        /* Apply the new settings. */
 151        return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
 152}
 153
 154/**
 155 * hynix_get_majority - get the value that is occurring the most in a given
 156 *                      set of values
 157 * @in: the array of values to test
 158 * @repeat: the size of the in array
 159 * @out: pointer used to store the output value
 160 *
 161 * This function implements the 'majority check' logic that is supposed to
 162 * overcome the unreliability of MLC NANDs when reading the OTP area storing
 163 * the read-retry parameters.
 164 *
 165 * It's based on a pretty simple assumption: if we repeat the same value
 166 * several times and then take the one that is occurring the most, we should
 167 * find the correct value.
 168 * Let's hope this dummy algorithm prevents us from losing the read-retry
 169 * parameters.
 170 */
 171static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
 172{
 173        int i, j, half = repeat / 2;
 174
 175        /*
 176         * We only test the first half of the in array because we must ensure
 177         * that the value is at least occurring repeat / 2 times.
 178         *
 179         * This loop is suboptimal since we may count the occurrences of the
 180         * same value several time, but we are doing that on small sets, which
 181         * makes it acceptable.
 182         */
 183        for (i = 0; i < half; i++) {
 184                int cnt = 0;
 185                u8 val = in[i];
 186
 187                /* Count all values that are matching the one at index i. */
 188                for (j = i + 1; j < repeat; j++) {
 189                        if (in[j] == val)
 190                                cnt++;
 191                }
 192
 193                /* We found a value occurring more than repeat / 2. */
 194                if (cnt > half) {
 195                        *out = val;
 196                        return 0;
 197                }
 198        }
 199
 200        return -EIO;
 201}
 202
 203static int hynix_read_rr_otp(struct nand_chip *chip,
 204                             const struct hynix_read_retry_otp *info,
 205                             void *buf)
 206{
 207        int i, ret;
 208
 209        ret = nand_reset_op(chip);
 210        if (ret)
 211                return ret;
 212
 213        ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
 214        if (ret)
 215                return ret;
 216
 217        for (i = 0; i < info->nregs; i++) {
 218                ret = hynix_nand_reg_write_op(chip, info->regs[i],
 219                                              info->values[i]);
 220                if (ret)
 221                        return ret;
 222        }
 223
 224        ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
 225        if (ret)
 226                return ret;
 227
 228        /* Sequence to enter OTP mode? */
 229        ret = hynix_nand_cmd_op(chip, 0x17);
 230        if (ret)
 231                return ret;
 232
 233        ret = hynix_nand_cmd_op(chip, 0x4);
 234        if (ret)
 235                return ret;
 236
 237        ret = hynix_nand_cmd_op(chip, 0x19);
 238        if (ret)
 239                return ret;
 240
 241        /* Now read the page */
 242        ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
 243        if (ret)
 244                return ret;
 245
 246        /* Put everything back to normal */
 247        ret = nand_reset_op(chip);
 248        if (ret)
 249                return ret;
 250
 251        ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
 252        if (ret)
 253                return ret;
 254
 255        ret = hynix_nand_reg_write_op(chip, 0x38, 0);
 256        if (ret)
 257                return ret;
 258
 259        ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
 260        if (ret)
 261                return ret;
 262
 263        return nand_read_page_op(chip, 0, 0, NULL, 0);
 264}
 265
 266#define NAND_HYNIX_1XNM_RR_COUNT_OFFS                           0
 267#define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS                       8
 268#define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv)            \
 269        (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
 270
 271static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
 272                                   int mode, int reg, bool inv, u8 *val)
 273{
 274        u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
 275        int val_offs = (mode * nregs) + reg;
 276        int set_size = nmodes * nregs;
 277        int i, ret;
 278
 279        for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
 280                int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
 281
 282                tmp[i] = buf[val_offs + set_offs];
 283        }
 284
 285        ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
 286        if (ret)
 287                return ret;
 288
 289        if (inv)
 290                *val = ~*val;
 291
 292        return 0;
 293}
 294
 295static u8 hynix_1xnm_mlc_read_retry_regs[] = {
 296        0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
 297};
 298
 299static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
 300                                  const struct hynix_read_retry_otp *info)
 301{
 302        struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
 303        struct hynix_read_retry *rr = NULL;
 304        int ret, i, j;
 305        u8 nregs, nmodes;
 306        u8 *buf;
 307
 308        buf = kmalloc(info->size, GFP_KERNEL);
 309        if (!buf)
 310                return -ENOMEM;
 311
 312        ret = hynix_read_rr_otp(chip, info, buf);
 313        if (ret)
 314                goto out;
 315
 316        ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
 317                                 &nmodes);
 318        if (ret)
 319                goto out;
 320
 321        ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
 322                                 NAND_HYNIX_1XNM_RR_REPEAT,
 323                                 &nregs);
 324        if (ret)
 325                goto out;
 326
 327        rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
 328        if (!rr) {
 329                ret = -ENOMEM;
 330                goto out;
 331        }
 332
 333        for (i = 0; i < nmodes; i++) {
 334                for (j = 0; j < nregs; j++) {
 335                        u8 *val = rr->values + (i * nregs);
 336
 337                        ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
 338                                                      false, val);
 339                        if (!ret)
 340                                continue;
 341
 342                        ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
 343                                                      true, val);
 344                        if (ret)
 345                                goto out;
 346                }
 347        }
 348
 349        rr->nregs = nregs;
 350        rr->regs = hynix_1xnm_mlc_read_retry_regs;
 351        hynix->read_retry = rr;
 352        chip->setup_read_retry = hynix_nand_setup_read_retry;
 353        chip->read_retries = nmodes;
 354
 355out:
 356        kfree(buf);
 357
 358        if (ret)
 359                kfree(rr);
 360
 361        return ret;
 362}
 363
 364static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
 365static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
 366
 367static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
 368        {
 369                .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
 370                .regs = hynix_mlc_1xnm_rr_otp_regs,
 371                .values = hynix_mlc_1xnm_rr_otp_values,
 372                .page = 0x21f,
 373                .size = 784
 374        },
 375        {
 376                .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
 377                .regs = hynix_mlc_1xnm_rr_otp_regs,
 378                .values = hynix_mlc_1xnm_rr_otp_values,
 379                .page = 0x200,
 380                .size = 528,
 381        },
 382};
 383
 384static int hynix_nand_rr_init(struct nand_chip *chip)
 385{
 386        int i, ret = 0;
 387        bool valid_jedecid;
 388
 389        valid_jedecid = hynix_nand_has_valid_jedecid(chip);
 390
 391        /*
 392         * We only support read-retry for 1xnm NANDs, and those NANDs all
 393         * expose a valid JEDEC ID.
 394         */
 395        if (valid_jedecid) {
 396                u8 nand_tech = chip->id.data[5] >> 4;
 397
 398                /* 1xnm technology */
 399                if (nand_tech == 4) {
 400                        for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
 401                             i++) {
 402                                /*
 403                                 * FIXME: Hynix recommend to copy the
 404                                 * read-retry OTP area into a normal page.
 405                                 */
 406                                ret = hynix_mlc_1xnm_rr_init(chip,
 407                                                hynix_mlc_1xnm_rr_otps);
 408                                if (!ret)
 409                                        break;
 410                        }
 411                }
 412        }
 413
 414        if (ret)
 415                pr_warn("failed to initialize read-retry infrastructure");
 416
 417        return 0;
 418}
 419
 420static void hynix_nand_extract_oobsize(struct nand_chip *chip,
 421                                       bool valid_jedecid)
 422{
 423        struct mtd_info *mtd = nand_to_mtd(chip);
 424        u8 oobsize;
 425
 426        oobsize = ((chip->id.data[3] >> 2) & 0x3) |
 427                  ((chip->id.data[3] >> 4) & 0x4);
 428
 429        if (valid_jedecid) {
 430                switch (oobsize) {
 431                case 0:
 432                        mtd->oobsize = 2048;
 433                        break;
 434                case 1:
 435                        mtd->oobsize = 1664;
 436                        break;
 437                case 2:
 438                        mtd->oobsize = 1024;
 439                        break;
 440                case 3:
 441                        mtd->oobsize = 640;
 442                        break;
 443                default:
 444                        /*
 445                         * We should never reach this case, but if that
 446                         * happens, this probably means Hynix decided to use
 447                         * a different extended ID format, and we should find
 448                         * a way to support it.
 449                         */
 450                        WARN(1, "Invalid OOB size");
 451                        break;
 452                }
 453        } else {
 454                switch (oobsize) {
 455                case 0:
 456                        mtd->oobsize = 128;
 457                        break;
 458                case 1:
 459                        mtd->oobsize = 224;
 460                        break;
 461                case 2:
 462                        mtd->oobsize = 448;
 463                        break;
 464                case 3:
 465                        mtd->oobsize = 64;
 466                        break;
 467                case 4:
 468                        mtd->oobsize = 32;
 469                        break;
 470                case 5:
 471                        mtd->oobsize = 16;
 472                        break;
 473                case 6:
 474                        mtd->oobsize = 640;
 475                        break;
 476                default:
 477                        /*
 478                         * We should never reach this case, but if that
 479                         * happens, this probably means Hynix decided to use
 480                         * a different extended ID format, and we should find
 481                         * a way to support it.
 482                         */
 483                        WARN(1, "Invalid OOB size");
 484                        break;
 485                }
 486
 487                /*
 488                 * The datasheet of H27UCG8T2BTR mentions that the "Redundant
 489                 * Area Size" is encoded "per 8KB" (page size). This chip uses
 490                 * a page size of 16KiB. The datasheet mentions an OOB size of
 491                 * 1.280 bytes, but the OOB size encoded in the ID bytes (using
 492                 * the existing logic above) is 640 bytes.
 493                 * Update the OOB size for this chip by taking the value
 494                 * determined above and scaling it to the actual page size (so
 495                 * the actual OOB size for this chip is: 640 * 16k / 8k).
 496                 */
 497                if (chip->id.data[1] == 0xde)
 498                        mtd->oobsize *= mtd->writesize / SZ_8K;
 499        }
 500}
 501
 502static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
 503                                                bool valid_jedecid)
 504{
 505        u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
 506
 507        if (valid_jedecid) {
 508                /* Reference: H27UCG8T2E datasheet */
 509                chip->ecc_step_ds = 1024;
 510
 511                switch (ecc_level) {
 512                case 0:
 513                        chip->ecc_step_ds = 0;
 514                        chip->ecc_strength_ds = 0;
 515                        break;
 516                case 1:
 517                        chip->ecc_strength_ds = 4;
 518                        break;
 519                case 2:
 520                        chip->ecc_strength_ds = 24;
 521                        break;
 522                case 3:
 523                        chip->ecc_strength_ds = 32;
 524                        break;
 525                case 4:
 526                        chip->ecc_strength_ds = 40;
 527                        break;
 528                case 5:
 529                        chip->ecc_strength_ds = 50;
 530                        break;
 531                case 6:
 532                        chip->ecc_strength_ds = 60;
 533                        break;
 534                default:
 535                        /*
 536                         * We should never reach this case, but if that
 537                         * happens, this probably means Hynix decided to use
 538                         * a different extended ID format, and we should find
 539                         * a way to support it.
 540                         */
 541                        WARN(1, "Invalid ECC requirements");
 542                }
 543        } else {
 544                /*
 545                 * The ECC requirements field meaning depends on the
 546                 * NAND technology.
 547                 */
 548                u8 nand_tech = chip->id.data[5] & 0x7;
 549
 550                if (nand_tech < 3) {
 551                        /* > 26nm, reference: H27UBG8T2A datasheet */
 552                        if (ecc_level < 5) {
 553                                chip->ecc_step_ds = 512;
 554                                chip->ecc_strength_ds = 1 << ecc_level;
 555                        } else if (ecc_level < 7) {
 556                                if (ecc_level == 5)
 557                                        chip->ecc_step_ds = 2048;
 558                                else
 559                                        chip->ecc_step_ds = 1024;
 560                                chip->ecc_strength_ds = 24;
 561                        } else {
 562                                /*
 563                                 * We should never reach this case, but if that
 564                                 * happens, this probably means Hynix decided
 565                                 * to use a different extended ID format, and
 566                                 * we should find a way to support it.
 567                                 */
 568                                WARN(1, "Invalid ECC requirements");
 569                        }
 570                } else {
 571                        /* <= 26nm, reference: H27UBG8T2B datasheet */
 572                        if (!ecc_level) {
 573                                chip->ecc_step_ds = 0;
 574                                chip->ecc_strength_ds = 0;
 575                        } else if (ecc_level < 5) {
 576                                chip->ecc_step_ds = 512;
 577                                chip->ecc_strength_ds = 1 << (ecc_level - 1);
 578                        } else {
 579                                chip->ecc_step_ds = 1024;
 580                                chip->ecc_strength_ds = 24 +
 581                                                        (8 * (ecc_level - 5));
 582                        }
 583                }
 584        }
 585}
 586
 587static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
 588                                                       bool valid_jedecid)
 589{
 590        u8 nand_tech;
 591
 592        /* We need scrambling on all TLC NANDs*/
 593        if (chip->bits_per_cell > 2)
 594                chip->options |= NAND_NEED_SCRAMBLING;
 595
 596        /* And on MLC NANDs with sub-3xnm process */
 597        if (valid_jedecid) {
 598                nand_tech = chip->id.data[5] >> 4;
 599
 600                /* < 3xnm */
 601                if (nand_tech > 0)
 602                        chip->options |= NAND_NEED_SCRAMBLING;
 603        } else {
 604                nand_tech = chip->id.data[5] & 0x7;
 605
 606                /* < 32nm */
 607                if (nand_tech > 2)
 608                        chip->options |= NAND_NEED_SCRAMBLING;
 609        }
 610}
 611
 612static void hynix_nand_decode_id(struct nand_chip *chip)
 613{
 614        struct mtd_info *mtd = nand_to_mtd(chip);
 615        bool valid_jedecid;
 616        u8 tmp;
 617
 618        /*
 619         * Exclude all SLC NANDs from this advanced detection scheme.
 620         * According to the ranges defined in several datasheets, it might
 621         * appear that even SLC NANDs could fall in this extended ID scheme.
 622         * If that the case rework the test to let SLC NANDs go through the
 623         * detection process.
 624         */
 625        if (chip->id.len < 6 || nand_is_slc(chip)) {
 626                nand_decode_ext_id(chip);
 627                return;
 628        }
 629
 630        /* Extract pagesize */
 631        mtd->writesize = 2048 << (chip->id.data[3] & 0x03);
 632
 633        tmp = (chip->id.data[3] >> 4) & 0x3;
 634        /*
 635         * When bit7 is set that means we start counting at 1MiB, otherwise
 636         * we start counting at 128KiB and shift this value the content of
 637         * ID[3][4:5].
 638         * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
 639         * this case the erasesize is set to 768KiB.
 640         */
 641        if (chip->id.data[3] & 0x80)
 642                mtd->erasesize = SZ_1M << tmp;
 643        else if (tmp == 3)
 644                mtd->erasesize = SZ_512K + SZ_256K;
 645        else
 646                mtd->erasesize = SZ_128K << tmp;
 647
 648        /*
 649         * Modern Toggle DDR NANDs have a valid JEDECID even though they are
 650         * not exposing a valid JEDEC parameter table.
 651         * These NANDs use a different NAND ID scheme.
 652         */
 653        valid_jedecid = hynix_nand_has_valid_jedecid(chip);
 654
 655        hynix_nand_extract_oobsize(chip, valid_jedecid);
 656        hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
 657        hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
 658}
 659
 660static void hynix_nand_cleanup(struct nand_chip *chip)
 661{
 662        struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
 663
 664        if (!hynix)
 665                return;
 666
 667        kfree(hynix->read_retry);
 668        kfree(hynix);
 669        nand_set_manufacturer_data(chip, NULL);
 670}
 671
 672static int hynix_nand_init(struct nand_chip *chip)
 673{
 674        struct hynix_nand *hynix;
 675        int ret;
 676
 677        if (!nand_is_slc(chip))
 678                chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
 679        else
 680                chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
 681
 682        hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
 683        if (!hynix)
 684                return -ENOMEM;
 685
 686        nand_set_manufacturer_data(chip, hynix);
 687
 688        ret = hynix_nand_rr_init(chip);
 689        if (ret)
 690                hynix_nand_cleanup(chip);
 691
 692        return ret;
 693}
 694
 695const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
 696        .detect = hynix_nand_decode_id,
 697        .init = hynix_nand_init,
 698        .cleanup = hynix_nand_cleanup,
 699};
 700