1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/module.h>
18#include <linux/delay.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/partitions.h>
21#include <linux/mtd/rawnand.h>
22#include <linux/spi/spi.h>
23
24#include "mt29f_spinand.h"
25
26#define BUFSIZE (10 * 64 * 2048)
27#define CACHE_BUF 2112
28
29
30
31
32static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd)
33{
34 struct nand_chip *chip = mtd_to_nand(mtd);
35 struct spinand_info *info = nand_get_controller_data(chip);
36 struct spinand_state *state = info->priv;
37
38 return state;
39}
40
41#ifdef CONFIG_MTD_SPINAND_ONDIEECC
42static int enable_hw_ecc;
43static int enable_read_hw_ecc;
44
45static int spinand_ooblayout_64_ecc(struct mtd_info *mtd, int section,
46 struct mtd_oob_region *oobregion)
47{
48 if (section > 3)
49 return -ERANGE;
50
51 oobregion->offset = (section * 16) + 1;
52 oobregion->length = 6;
53
54 return 0;
55}
56
57static int spinand_ooblayout_64_free(struct mtd_info *mtd, int section,
58 struct mtd_oob_region *oobregion)
59{
60 if (section > 3)
61 return -ERANGE;
62
63 oobregion->offset = (section * 16) + 8;
64 oobregion->length = 8;
65
66 return 0;
67}
68
69static const struct mtd_ooblayout_ops spinand_oob_64_ops = {
70 .ecc = spinand_ooblayout_64_ecc,
71 .free = spinand_ooblayout_64_free,
72};
73#endif
74
75
76
77
78
79
80
81
82static int spinand_cmd(struct spi_device *spi, struct spinand_cmd *cmd)
83{
84 struct spi_message message;
85 struct spi_transfer x[4];
86 u8 dummy = 0xff;
87
88 spi_message_init(&message);
89 memset(x, 0, sizeof(x));
90
91 x[0].len = 1;
92 x[0].tx_buf = &cmd->cmd;
93 spi_message_add_tail(&x[0], &message);
94
95 if (cmd->n_addr) {
96 x[1].len = cmd->n_addr;
97 x[1].tx_buf = cmd->addr;
98 spi_message_add_tail(&x[1], &message);
99 }
100
101 if (cmd->n_dummy) {
102 x[2].len = cmd->n_dummy;
103 x[2].tx_buf = &dummy;
104 spi_message_add_tail(&x[2], &message);
105 }
106
107 if (cmd->n_tx) {
108 x[3].len = cmd->n_tx;
109 x[3].tx_buf = cmd->tx_buf;
110 spi_message_add_tail(&x[3], &message);
111 }
112
113 if (cmd->n_rx) {
114 x[3].len = cmd->n_rx;
115 x[3].rx_buf = cmd->rx_buf;
116 spi_message_add_tail(&x[3], &message);
117 }
118
119 return spi_sync(spi, &message);
120}
121
122
123
124
125
126
127static int spinand_read_id(struct spi_device *spi_nand, u8 *id)
128{
129 int retval;
130 u8 nand_id[3];
131 struct spinand_cmd cmd = {0};
132
133 cmd.cmd = CMD_READ_ID;
134 cmd.n_rx = 3;
135 cmd.rx_buf = &nand_id[0];
136
137 retval = spinand_cmd(spi_nand, &cmd);
138 if (retval < 0) {
139 dev_err(&spi_nand->dev, "error %d reading id\n", retval);
140 return retval;
141 }
142 id[0] = nand_id[1];
143 id[1] = nand_id[2];
144 return retval;
145}
146
147
148
149
150
151
152
153
154
155
156
157static int spinand_read_status(struct spi_device *spi_nand, u8 *status)
158{
159 struct spinand_cmd cmd = {0};
160 int ret;
161
162 cmd.cmd = CMD_READ_REG;
163 cmd.n_addr = 1;
164 cmd.addr[0] = REG_STATUS;
165 cmd.n_rx = 1;
166 cmd.rx_buf = status;
167
168 ret = spinand_cmd(spi_nand, &cmd);
169 if (ret < 0)
170 dev_err(&spi_nand->dev, "err: %d read status register\n", ret);
171
172 return ret;
173}
174
175#define MAX_WAIT_JIFFIES (40 * HZ)
176static int wait_till_ready(struct spi_device *spi_nand)
177{
178 unsigned long deadline;
179 int retval;
180 u8 stat = 0;
181
182 deadline = jiffies + MAX_WAIT_JIFFIES;
183 do {
184 retval = spinand_read_status(spi_nand, &stat);
185 if (retval < 0)
186 return -1;
187 if (!(stat & 0x1))
188 break;
189
190 cond_resched();
191 } while (!time_after_eq(jiffies, deadline));
192
193 if ((stat & 0x1) == 0)
194 return 0;
195
196 return -1;
197}
198
199
200
201
202
203
204
205
206static int spinand_get_otp(struct spi_device *spi_nand, u8 *otp)
207{
208 struct spinand_cmd cmd = {0};
209 int retval;
210
211 cmd.cmd = CMD_READ_REG;
212 cmd.n_addr = 1;
213 cmd.addr[0] = REG_OTP;
214 cmd.n_rx = 1;
215 cmd.rx_buf = otp;
216
217 retval = spinand_cmd(spi_nand, &cmd);
218 if (retval < 0)
219 dev_err(&spi_nand->dev, "error %d get otp\n", retval);
220 return retval;
221}
222
223
224
225
226
227
228
229
230static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp)
231{
232 int retval;
233 struct spinand_cmd cmd = {0};
234
235 cmd.cmd = CMD_WRITE_REG;
236 cmd.n_addr = 1;
237 cmd.addr[0] = REG_OTP;
238 cmd.n_tx = 1;
239 cmd.tx_buf = otp;
240
241 retval = spinand_cmd(spi_nand, &cmd);
242 if (retval < 0)
243 dev_err(&spi_nand->dev, "error %d set otp\n", retval);
244
245 return retval;
246}
247
248#ifdef CONFIG_MTD_SPINAND_ONDIEECC
249
250
251
252
253
254
255
256static int spinand_enable_ecc(struct spi_device *spi_nand)
257{
258 int retval;
259 u8 otp = 0;
260
261 retval = spinand_get_otp(spi_nand, &otp);
262 if (retval < 0)
263 return retval;
264
265 if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK)
266 return 0;
267 otp |= OTP_ECC_MASK;
268 retval = spinand_set_otp(spi_nand, &otp);
269 if (retval < 0)
270 return retval;
271 return spinand_get_otp(spi_nand, &otp);
272}
273#endif
274
275static int spinand_disable_ecc(struct spi_device *spi_nand)
276{
277 int retval;
278 u8 otp = 0;
279
280 retval = spinand_get_otp(spi_nand, &otp);
281 if (retval < 0)
282 return retval;
283
284 if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
285 otp &= ~OTP_ECC_MASK;
286 retval = spinand_set_otp(spi_nand, &otp);
287 if (retval < 0)
288 return retval;
289 return spinand_get_otp(spi_nand, &otp);
290 }
291 return 0;
292}
293
294
295
296
297
298
299
300
301
302
303static int spinand_write_enable(struct spi_device *spi_nand)
304{
305 struct spinand_cmd cmd = {0};
306
307 cmd.cmd = CMD_WR_ENABLE;
308 return spinand_cmd(spi_nand, &cmd);
309}
310
311static int spinand_read_page_to_cache(struct spi_device *spi_nand, u16 page_id)
312{
313 struct spinand_cmd cmd = {0};
314 u16 row;
315
316 row = page_id;
317 cmd.cmd = CMD_READ;
318 cmd.n_addr = 3;
319 cmd.addr[1] = (u8)((row & 0xff00) >> 8);
320 cmd.addr[2] = (u8)(row & 0x00ff);
321
322 return spinand_cmd(spi_nand, &cmd);
323}
324
325
326
327
328
329
330
331
332
333static int spinand_read_from_cache(struct spi_device *spi_nand, u16 page_id,
334 u16 byte_id, u16 len, u8 *rbuf)
335{
336 struct spinand_cmd cmd = {0};
337 u16 column;
338
339 column = byte_id;
340 cmd.cmd = CMD_READ_RDM;
341 cmd.n_addr = 3;
342 cmd.addr[0] = (u8)((column & 0xff00) >> 8);
343 cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
344 cmd.addr[1] = (u8)(column & 0x00ff);
345 cmd.addr[2] = (u8)(0xff);
346 cmd.n_dummy = 0;
347 cmd.n_rx = len;
348 cmd.rx_buf = rbuf;
349
350 return spinand_cmd(spi_nand, &cmd);
351}
352
353
354
355
356
357
358
359
360
361
362
363
364static int spinand_read_page(struct spi_device *spi_nand, u16 page_id,
365 u16 offset, u16 len, u8 *rbuf)
366{
367 int ret;
368 u8 status = 0;
369
370#ifdef CONFIG_MTD_SPINAND_ONDIEECC
371 if (enable_read_hw_ecc) {
372 if (spinand_enable_ecc(spi_nand) < 0)
373 dev_err(&spi_nand->dev, "enable HW ECC failed!");
374 }
375#endif
376 ret = spinand_read_page_to_cache(spi_nand, page_id);
377 if (ret < 0)
378 return ret;
379
380 if (wait_till_ready(spi_nand))
381 dev_err(&spi_nand->dev, "WAIT timedout!!!\n");
382
383 while (1) {
384 ret = spinand_read_status(spi_nand, &status);
385 if (ret < 0) {
386 dev_err(&spi_nand->dev,
387 "err %d read status register\n", ret);
388 return ret;
389 }
390
391 if ((status & STATUS_OIP_MASK) == STATUS_READY) {
392 if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
393 dev_err(&spi_nand->dev, "ecc error, page=%d\n",
394 page_id);
395 return 0;
396 }
397 break;
398 }
399 }
400
401 ret = spinand_read_from_cache(spi_nand, page_id, offset, len, rbuf);
402 if (ret < 0) {
403 dev_err(&spi_nand->dev, "read from cache failed!!\n");
404 return ret;
405 }
406
407#ifdef CONFIG_MTD_SPINAND_ONDIEECC
408 if (enable_read_hw_ecc) {
409 ret = spinand_disable_ecc(spi_nand);
410 if (ret < 0) {
411 dev_err(&spi_nand->dev, "disable ecc failed!!\n");
412 return ret;
413 }
414 enable_read_hw_ecc = 0;
415 }
416#endif
417 return ret;
418}
419
420
421
422
423
424
425
426
427
428
429
430
431static int spinand_program_data_to_cache(struct spi_device *spi_nand,
432 u16 page_id, u16 byte_id,
433 u16 len, u8 *wbuf)
434{
435 struct spinand_cmd cmd = {0};
436 u16 column;
437
438 column = byte_id;
439 cmd.cmd = CMD_PROG_PAGE_CLRCACHE;
440 cmd.n_addr = 2;
441 cmd.addr[0] = (u8)((column & 0xff00) >> 8);
442 cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
443 cmd.addr[1] = (u8)(column & 0x00ff);
444 cmd.n_tx = len;
445 cmd.tx_buf = wbuf;
446
447 return spinand_cmd(spi_nand, &cmd);
448}
449
450
451
452
453
454
455
456
457
458
459static int spinand_program_execute(struct spi_device *spi_nand, u16 page_id)
460{
461 struct spinand_cmd cmd = {0};
462 u16 row;
463
464 row = page_id;
465 cmd.cmd = CMD_PROG_PAGE_EXC;
466 cmd.n_addr = 3;
467 cmd.addr[1] = (u8)((row & 0xff00) >> 8);
468 cmd.addr[2] = (u8)(row & 0x00ff);
469
470 return spinand_cmd(spi_nand, &cmd);
471}
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486static int spinand_program_page(struct spi_device *spi_nand,
487 u16 page_id, u16 offset, u16 len, u8 *buf)
488{
489 int retval;
490 u8 status = 0;
491 u8 *wbuf;
492#ifdef CONFIG_MTD_SPINAND_ONDIEECC
493 unsigned int i, j;
494
495 wbuf = devm_kzalloc(&spi_nand->dev, CACHE_BUF, GFP_KERNEL);
496 if (!wbuf)
497 return -ENOMEM;
498
499 enable_read_hw_ecc = 1;
500 retval = spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
501 if (retval < 0) {
502 dev_err(&spi_nand->dev, "ecc error on read page!!!\n");
503 return retval;
504 }
505
506 for (i = offset, j = 0; i < len; i++, j++)
507 wbuf[i] &= buf[j];
508
509 if (enable_hw_ecc) {
510 retval = spinand_enable_ecc(spi_nand);
511 if (retval < 0) {
512 dev_err(&spi_nand->dev, "enable ecc failed!!\n");
513 return retval;
514 }
515 }
516#else
517 wbuf = buf;
518#endif
519 retval = spinand_write_enable(spi_nand);
520 if (retval < 0) {
521 dev_err(&spi_nand->dev, "write enable failed!!\n");
522 return retval;
523 }
524 if (wait_till_ready(spi_nand))
525 dev_err(&spi_nand->dev, "wait timedout!!!\n");
526
527 retval = spinand_program_data_to_cache(spi_nand, page_id,
528 offset, len, wbuf);
529 if (retval < 0)
530 return retval;
531 retval = spinand_program_execute(spi_nand, page_id);
532 if (retval < 0)
533 return retval;
534 while (1) {
535 retval = spinand_read_status(spi_nand, &status);
536 if (retval < 0) {
537 dev_err(&spi_nand->dev,
538 "error %d reading status register\n", retval);
539 return retval;
540 }
541
542 if ((status & STATUS_OIP_MASK) == STATUS_READY) {
543 if ((status & STATUS_P_FAIL_MASK) == STATUS_P_FAIL) {
544 dev_err(&spi_nand->dev,
545 "program error, page %d\n", page_id);
546 return -1;
547 }
548 break;
549 }
550 }
551#ifdef CONFIG_MTD_SPINAND_ONDIEECC
552 if (enable_hw_ecc) {
553 retval = spinand_disable_ecc(spi_nand);
554 if (retval < 0) {
555 dev_err(&spi_nand->dev, "disable ecc failed!!\n");
556 return retval;
557 }
558 enable_hw_ecc = 0;
559 }
560#endif
561
562 return 0;
563}
564
565
566
567
568
569
570
571
572
573
574static int spinand_erase_block_erase(struct spi_device *spi_nand, u16 block_id)
575{
576 struct spinand_cmd cmd = {0};
577 u16 row;
578
579 row = block_id;
580 cmd.cmd = CMD_ERASE_BLK;
581 cmd.n_addr = 3;
582 cmd.addr[1] = (u8)((row & 0xff00) >> 8);
583 cmd.addr[2] = (u8)(row & 0x00ff);
584
585 return spinand_cmd(spi_nand, &cmd);
586}
587
588
589
590
591
592
593
594
595
596
597
598
599static int spinand_erase_block(struct spi_device *spi_nand, u16 block_id)
600{
601 int retval;
602 u8 status = 0;
603
604 retval = spinand_write_enable(spi_nand);
605 if (wait_till_ready(spi_nand))
606 dev_err(&spi_nand->dev, "wait timedout!!!\n");
607
608 retval = spinand_erase_block_erase(spi_nand, block_id);
609 while (1) {
610 retval = spinand_read_status(spi_nand, &status);
611 if (retval < 0) {
612 dev_err(&spi_nand->dev,
613 "error %d reading status register\n", retval);
614 return retval;
615 }
616
617 if ((status & STATUS_OIP_MASK) == STATUS_READY) {
618 if ((status & STATUS_E_FAIL_MASK) == STATUS_E_FAIL) {
619 dev_err(&spi_nand->dev,
620 "erase error, block %d\n", block_id);
621 return -1;
622 }
623 break;
624 }
625 }
626 return 0;
627}
628
629#ifdef CONFIG_MTD_SPINAND_ONDIEECC
630static int spinand_write_page_hwecc(struct mtd_info *mtd,
631 struct nand_chip *chip,
632 const u8 *buf, int oob_required,
633 int page)
634{
635 const u8 *p = buf;
636 int eccsize = chip->ecc.size;
637 int eccsteps = chip->ecc.steps;
638
639 enable_hw_ecc = 1;
640 chip->write_buf(mtd, p, eccsize * eccsteps);
641 return 0;
642}
643
644static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
645 u8 *buf, int oob_required, int page)
646{
647 int retval;
648 u8 status;
649 u8 *p = buf;
650 int eccsize = chip->ecc.size;
651 int eccsteps = chip->ecc.steps;
652 struct spinand_info *info = nand_get_controller_data(chip);
653
654 enable_read_hw_ecc = 1;
655
656 chip->read_buf(mtd, p, eccsize * eccsteps);
657 if (oob_required)
658 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
659
660 while (1) {
661 retval = spinand_read_status(info->spi, &status);
662 if (retval < 0) {
663 dev_err(&mtd->dev,
664 "error %d reading status register\n", retval);
665 return retval;
666 }
667
668 if ((status & STATUS_OIP_MASK) == STATUS_READY) {
669 if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
670 pr_info("spinand: ECC error\n");
671 mtd->ecc_stats.failed++;
672 } else if ((status & STATUS_ECC_MASK) ==
673 STATUS_ECC_1BIT_CORRECTED)
674 mtd->ecc_stats.corrected++;
675 break;
676 }
677 }
678 return 0;
679}
680#endif
681
682static void spinand_select_chip(struct mtd_info *mtd, int dev)
683{
684}
685
686static u8 spinand_read_byte(struct mtd_info *mtd)
687{
688 struct spinand_state *state = mtd_to_state(mtd);
689 u8 data;
690
691 data = state->buf[state->buf_ptr];
692 state->buf_ptr++;
693 return data;
694}
695
696static int spinand_wait(struct mtd_info *mtd, struct nand_chip *chip)
697{
698 struct spinand_info *info = nand_get_controller_data(chip);
699
700 unsigned long timeo = jiffies;
701 int retval, state = chip->state;
702 u8 status;
703
704 if (state == FL_ERASING)
705 timeo += (HZ * 400) / 1000;
706 else
707 timeo += (HZ * 20) / 1000;
708
709 while (time_before(jiffies, timeo)) {
710 retval = spinand_read_status(info->spi, &status);
711 if (retval < 0) {
712 dev_err(&mtd->dev,
713 "error %d reading status register\n", retval);
714 return retval;
715 }
716
717 if ((status & STATUS_OIP_MASK) == STATUS_READY)
718 return 0;
719
720 cond_resched();
721 }
722 return 0;
723}
724
725static void spinand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
726{
727 struct spinand_state *state = mtd_to_state(mtd);
728
729 memcpy(state->buf + state->buf_ptr, buf, len);
730 state->buf_ptr += len;
731}
732
733static void spinand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
734{
735 struct spinand_state *state = mtd_to_state(mtd);
736
737 memcpy(buf, state->buf + state->buf_ptr, len);
738 state->buf_ptr += len;
739}
740
741
742
743
744static void spinand_reset(struct spi_device *spi_nand)
745{
746 struct spinand_cmd cmd = {0};
747
748 cmd.cmd = CMD_RESET;
749
750 if (spinand_cmd(spi_nand, &cmd) < 0)
751 pr_info("spinand reset failed!\n");
752
753
754 usleep_range(1000, 2000);
755
756 if (wait_till_ready(spi_nand))
757 dev_err(&spi_nand->dev, "wait timedout!\n");
758}
759
760static void spinand_cmdfunc(struct mtd_info *mtd, unsigned int command,
761 int column, int page)
762{
763 struct nand_chip *chip = mtd_to_nand(mtd);
764 struct spinand_info *info = nand_get_controller_data(chip);
765 struct spinand_state *state = info->priv;
766
767 switch (command) {
768
769
770
771 case NAND_CMD_READ1:
772 case NAND_CMD_READ0:
773 state->buf_ptr = 0;
774 spinand_read_page(info->spi, page, 0x0, 0x840, state->buf);
775 break;
776
777 case NAND_CMD_READOOB:
778 state->buf_ptr = 0;
779 spinand_read_page(info->spi, page, 0x800, 0x40, state->buf);
780 break;
781 case NAND_CMD_RNDOUT:
782 state->buf_ptr = column;
783 break;
784 case NAND_CMD_READID:
785 state->buf_ptr = 0;
786 spinand_read_id(info->spi, state->buf);
787 break;
788 case NAND_CMD_PARAM:
789 state->buf_ptr = 0;
790 break;
791
792 case NAND_CMD_ERASE1:
793 spinand_erase_block(info->spi, page);
794 break;
795
796 case NAND_CMD_ERASE2:
797 break;
798
799 case NAND_CMD_SEQIN:
800 state->col = column;
801 state->row = page;
802 state->buf_ptr = 0;
803 break;
804
805 case NAND_CMD_PAGEPROG:
806 spinand_program_page(info->spi, state->row, state->col,
807 state->buf_ptr, state->buf);
808 break;
809 case NAND_CMD_STATUS:
810 spinand_get_otp(info->spi, state->buf);
811 if (!(state->buf[0] & 0x80))
812 state->buf[0] = 0x80;
813 state->buf_ptr = 0;
814 break;
815
816 case NAND_CMD_RESET:
817 if (wait_till_ready(info->spi))
818 dev_err(&info->spi->dev, "WAIT timedout!!!\n");
819
820 usleep_range(250, 1000);
821 spinand_reset(info->spi);
822 break;
823 default:
824 dev_err(&mtd->dev, "Unknown CMD: 0x%x\n", command);
825 }
826}
827
828
829
830
831
832
833
834
835static int spinand_lock_block(struct spi_device *spi_nand, u8 lock)
836{
837 struct spinand_cmd cmd = {0};
838 int ret;
839 u8 otp = 0;
840
841 ret = spinand_get_otp(spi_nand, &otp);
842
843 cmd.cmd = CMD_WRITE_REG;
844 cmd.n_addr = 1;
845 cmd.addr[0] = REG_BLOCK_LOCK;
846 cmd.n_tx = 1;
847 cmd.tx_buf = &lock;
848
849 ret = spinand_cmd(spi_nand, &cmd);
850 if (ret < 0)
851 dev_err(&spi_nand->dev, "error %d lock block\n", ret);
852
853 return ret;
854}
855
856
857
858
859
860
861
862
863static int spinand_probe(struct spi_device *spi_nand)
864{
865 struct mtd_info *mtd;
866 struct nand_chip *chip;
867 struct spinand_info *info;
868 struct spinand_state *state;
869
870 info = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_info),
871 GFP_KERNEL);
872 if (!info)
873 return -ENOMEM;
874
875 info->spi = spi_nand;
876
877 spinand_lock_block(spi_nand, BL_ALL_UNLOCKED);
878
879 state = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_state),
880 GFP_KERNEL);
881 if (!state)
882 return -ENOMEM;
883
884 info->priv = state;
885 state->buf_ptr = 0;
886 state->buf = devm_kzalloc(&spi_nand->dev, BUFSIZE, GFP_KERNEL);
887 if (!state->buf)
888 return -ENOMEM;
889
890 chip = devm_kzalloc(&spi_nand->dev, sizeof(struct nand_chip),
891 GFP_KERNEL);
892 if (!chip)
893 return -ENOMEM;
894
895#ifdef CONFIG_MTD_SPINAND_ONDIEECC
896 chip->ecc.mode = NAND_ECC_HW;
897 chip->ecc.size = 0x200;
898 chip->ecc.bytes = 0x6;
899 chip->ecc.steps = 0x4;
900
901 chip->ecc.strength = 1;
902 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
903 chip->ecc.read_page = spinand_read_page_hwecc;
904 chip->ecc.write_page = spinand_write_page_hwecc;
905#else
906 chip->ecc.mode = NAND_ECC_SOFT;
907 chip->ecc.algo = NAND_ECC_HAMMING;
908 if (spinand_disable_ecc(spi_nand) < 0)
909 dev_info(&spi_nand->dev, "%s: disable ecc failed!\n",
910 __func__);
911#endif
912
913 nand_set_flash_node(chip, spi_nand->dev.of_node);
914 nand_set_controller_data(chip, info);
915 chip->read_buf = spinand_read_buf;
916 chip->write_buf = spinand_write_buf;
917 chip->read_byte = spinand_read_byte;
918 chip->cmdfunc = spinand_cmdfunc;
919 chip->waitfunc = spinand_wait;
920 chip->options |= NAND_CACHEPRG;
921 chip->select_chip = spinand_select_chip;
922 chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
923 chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
924
925 mtd = nand_to_mtd(chip);
926
927 dev_set_drvdata(&spi_nand->dev, mtd);
928
929 mtd->dev.parent = &spi_nand->dev;
930 mtd->oobsize = 64;
931#ifdef CONFIG_MTD_SPINAND_ONDIEECC
932 mtd_set_ooblayout(mtd, &spinand_oob_64_ops);
933#endif
934
935 if (nand_scan(mtd, 1))
936 return -ENXIO;
937
938 return mtd_device_register(mtd, NULL, 0);
939}
940
941
942
943
944
945
946
947
948static int spinand_remove(struct spi_device *spi)
949{
950 mtd_device_unregister(dev_get_drvdata(&spi->dev));
951
952 return 0;
953}
954
955static const struct of_device_id spinand_dt[] = {
956 { .compatible = "spinand,mt29f", },
957 {}
958};
959MODULE_DEVICE_TABLE(of, spinand_dt);
960
961
962
963
964static struct spi_driver spinand_driver = {
965 .driver = {
966 .name = "mt29f",
967 .of_match_table = spinand_dt,
968 },
969 .probe = spinand_probe,
970 .remove = spinand_remove,
971};
972
973module_spi_driver(spinand_driver);
974
975MODULE_DESCRIPTION("SPI NAND driver for Micron");
976MODULE_AUTHOR("Henry Pan <hspan@micron.com>, Kamlakant Patel <kamlakant.patel@broadcom.com>");
977MODULE_LICENSE("GPL v2");
978