1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/module.h>
18#include <linux/delay.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/partitions.h>
21#include <linux/mtd/nand.h>
22#include <linux/spi/spi.h>
23
24#include "mt29f_spinand.h"
25
26#define BUFSIZE (10 * 64 * 2048)
27#define CACHE_BUF 2112
28
29
30
31
32static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd)
33{
34 struct nand_chip *chip = mtd_to_nand(mtd);
35 struct spinand_info *info = nand_get_controller_data(chip);
36 struct spinand_state *state = info->priv;
37
38 return state;
39}
40
41#ifdef CONFIG_MTD_SPINAND_ONDIEECC
42static int enable_hw_ecc;
43static int enable_read_hw_ecc;
44
45static struct nand_ecclayout spinand_oob_64 = {
46 .eccbytes = 24,
47 .eccpos = {
48 1, 2, 3, 4, 5, 6,
49 17, 18, 19, 20, 21, 22,
50 33, 34, 35, 36, 37, 38,
51 49, 50, 51, 52, 53, 54, },
52 .oobfree = {
53 {.offset = 8,
54 .length = 8},
55 {.offset = 24,
56 .length = 8},
57 {.offset = 40,
58 .length = 8},
59 {.offset = 56,
60 .length = 8},
61 }
62};
63#endif
64
65
66
67
68
69
70
71
72static int spinand_cmd(struct spi_device *spi, struct spinand_cmd *cmd)
73{
74 struct spi_message message;
75 struct spi_transfer x[4];
76 u8 dummy = 0xff;
77
78 spi_message_init(&message);
79 memset(x, 0, sizeof(x));
80
81 x[0].len = 1;
82 x[0].tx_buf = &cmd->cmd;
83 spi_message_add_tail(&x[0], &message);
84
85 if (cmd->n_addr) {
86 x[1].len = cmd->n_addr;
87 x[1].tx_buf = cmd->addr;
88 spi_message_add_tail(&x[1], &message);
89 }
90
91 if (cmd->n_dummy) {
92 x[2].len = cmd->n_dummy;
93 x[2].tx_buf = &dummy;
94 spi_message_add_tail(&x[2], &message);
95 }
96
97 if (cmd->n_tx) {
98 x[3].len = cmd->n_tx;
99 x[3].tx_buf = cmd->tx_buf;
100 spi_message_add_tail(&x[3], &message);
101 }
102
103 if (cmd->n_rx) {
104 x[3].len = cmd->n_rx;
105 x[3].rx_buf = cmd->rx_buf;
106 spi_message_add_tail(&x[3], &message);
107 }
108
109 return spi_sync(spi, &message);
110}
111
112
113
114
115
116
117static int spinand_read_id(struct spi_device *spi_nand, u8 *id)
118{
119 int retval;
120 u8 nand_id[3];
121 struct spinand_cmd cmd = {0};
122
123 cmd.cmd = CMD_READ_ID;
124 cmd.n_rx = 3;
125 cmd.rx_buf = &nand_id[0];
126
127 retval = spinand_cmd(spi_nand, &cmd);
128 if (retval < 0) {
129 dev_err(&spi_nand->dev, "error %d reading id\n", retval);
130 return retval;
131 }
132 id[0] = nand_id[1];
133 id[1] = nand_id[2];
134 return retval;
135}
136
137
138
139
140
141
142
143
144
145
146
147static int spinand_read_status(struct spi_device *spi_nand, u8 *status)
148{
149 struct spinand_cmd cmd = {0};
150 int ret;
151
152 cmd.cmd = CMD_READ_REG;
153 cmd.n_addr = 1;
154 cmd.addr[0] = REG_STATUS;
155 cmd.n_rx = 1;
156 cmd.rx_buf = status;
157
158 ret = spinand_cmd(spi_nand, &cmd);
159 if (ret < 0)
160 dev_err(&spi_nand->dev, "err: %d read status register\n", ret);
161
162 return ret;
163}
164
165#define MAX_WAIT_JIFFIES (40 * HZ)
166static int wait_till_ready(struct spi_device *spi_nand)
167{
168 unsigned long deadline;
169 int retval;
170 u8 stat = 0;
171
172 deadline = jiffies + MAX_WAIT_JIFFIES;
173 do {
174 retval = spinand_read_status(spi_nand, &stat);
175 if (retval < 0)
176 return -1;
177 if (!(stat & 0x1))
178 break;
179
180 cond_resched();
181 } while (!time_after_eq(jiffies, deadline));
182
183 if ((stat & 0x1) == 0)
184 return 0;
185
186 return -1;
187}
188
189
190
191
192
193
194
195
196static int spinand_get_otp(struct spi_device *spi_nand, u8 *otp)
197{
198 struct spinand_cmd cmd = {0};
199 int retval;
200
201 cmd.cmd = CMD_READ_REG;
202 cmd.n_addr = 1;
203 cmd.addr[0] = REG_OTP;
204 cmd.n_rx = 1;
205 cmd.rx_buf = otp;
206
207 retval = spinand_cmd(spi_nand, &cmd);
208 if (retval < 0)
209 dev_err(&spi_nand->dev, "error %d get otp\n", retval);
210 return retval;
211}
212
213
214
215
216
217
218
219
220static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp)
221{
222 int retval;
223 struct spinand_cmd cmd = {0};
224
225 cmd.cmd = CMD_WRITE_REG;
226 cmd.n_addr = 1;
227 cmd.addr[0] = REG_OTP;
228 cmd.n_tx = 1;
229 cmd.tx_buf = otp;
230
231 retval = spinand_cmd(spi_nand, &cmd);
232 if (retval < 0)
233 dev_err(&spi_nand->dev, "error %d set otp\n", retval);
234
235 return retval;
236}
237
238#ifdef CONFIG_MTD_SPINAND_ONDIEECC
239
240
241
242
243
244
245
246static int spinand_enable_ecc(struct spi_device *spi_nand)
247{
248 int retval;
249 u8 otp = 0;
250
251 retval = spinand_get_otp(spi_nand, &otp);
252 if (retval < 0)
253 return retval;
254
255 if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK)
256 return 0;
257 otp |= OTP_ECC_MASK;
258 retval = spinand_set_otp(spi_nand, &otp);
259 if (retval < 0)
260 return retval;
261 return spinand_get_otp(spi_nand, &otp);
262}
263#endif
264
265static int spinand_disable_ecc(struct spi_device *spi_nand)
266{
267 int retval;
268 u8 otp = 0;
269
270 retval = spinand_get_otp(spi_nand, &otp);
271 if (retval < 0)
272 return retval;
273
274 if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
275 otp &= ~OTP_ECC_MASK;
276 retval = spinand_set_otp(spi_nand, &otp);
277 if (retval < 0)
278 return retval;
279 return spinand_get_otp(spi_nand, &otp);
280 }
281 return 0;
282}
283
284
285
286
287
288
289
290
291
292
293static int spinand_write_enable(struct spi_device *spi_nand)
294{
295 struct spinand_cmd cmd = {0};
296
297 cmd.cmd = CMD_WR_ENABLE;
298 return spinand_cmd(spi_nand, &cmd);
299}
300
301static int spinand_read_page_to_cache(struct spi_device *spi_nand, u16 page_id)
302{
303 struct spinand_cmd cmd = {0};
304 u16 row;
305
306 row = page_id;
307 cmd.cmd = CMD_READ;
308 cmd.n_addr = 3;
309 cmd.addr[1] = (u8)((row & 0xff00) >> 8);
310 cmd.addr[2] = (u8)(row & 0x00ff);
311
312 return spinand_cmd(spi_nand, &cmd);
313}
314
315
316
317
318
319
320
321
322
323static int spinand_read_from_cache(struct spi_device *spi_nand, u16 page_id,
324 u16 byte_id, u16 len, u8 *rbuf)
325{
326 struct spinand_cmd cmd = {0};
327 u16 column;
328
329 column = byte_id;
330 cmd.cmd = CMD_READ_RDM;
331 cmd.n_addr = 3;
332 cmd.addr[0] = (u8)((column & 0xff00) >> 8);
333 cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
334 cmd.addr[1] = (u8)(column & 0x00ff);
335 cmd.addr[2] = (u8)(0xff);
336 cmd.n_dummy = 0;
337 cmd.n_rx = len;
338 cmd.rx_buf = rbuf;
339
340 return spinand_cmd(spi_nand, &cmd);
341}
342
343
344
345
346
347
348
349
350
351
352
353
354static int spinand_read_page(struct spi_device *spi_nand, u16 page_id,
355 u16 offset, u16 len, u8 *rbuf)
356{
357 int ret;
358 u8 status = 0;
359
360#ifdef CONFIG_MTD_SPINAND_ONDIEECC
361 if (enable_read_hw_ecc) {
362 if (spinand_enable_ecc(spi_nand) < 0)
363 dev_err(&spi_nand->dev, "enable HW ECC failed!");
364 }
365#endif
366 ret = spinand_read_page_to_cache(spi_nand, page_id);
367 if (ret < 0)
368 return ret;
369
370 if (wait_till_ready(spi_nand))
371 dev_err(&spi_nand->dev, "WAIT timedout!!!\n");
372
373 while (1) {
374 ret = spinand_read_status(spi_nand, &status);
375 if (ret < 0) {
376 dev_err(&spi_nand->dev,
377 "err %d read status register\n", ret);
378 return ret;
379 }
380
381 if ((status & STATUS_OIP_MASK) == STATUS_READY) {
382 if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
383 dev_err(&spi_nand->dev, "ecc error, page=%d\n",
384 page_id);
385 return 0;
386 }
387 break;
388 }
389 }
390
391 ret = spinand_read_from_cache(spi_nand, page_id, offset, len, rbuf);
392 if (ret < 0) {
393 dev_err(&spi_nand->dev, "read from cache failed!!\n");
394 return ret;
395 }
396
397#ifdef CONFIG_MTD_SPINAND_ONDIEECC
398 if (enable_read_hw_ecc) {
399 ret = spinand_disable_ecc(spi_nand);
400 if (ret < 0) {
401 dev_err(&spi_nand->dev, "disable ecc failed!!\n");
402 return ret;
403 }
404 enable_read_hw_ecc = 0;
405 }
406#endif
407 return ret;
408}
409
410
411
412
413
414
415
416
417
418
419
420
421static int spinand_program_data_to_cache(struct spi_device *spi_nand,
422 u16 page_id, u16 byte_id,
423 u16 len, u8 *wbuf)
424{
425 struct spinand_cmd cmd = {0};
426 u16 column;
427
428 column = byte_id;
429 cmd.cmd = CMD_PROG_PAGE_CLRCACHE;
430 cmd.n_addr = 2;
431 cmd.addr[0] = (u8)((column & 0xff00) >> 8);
432 cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
433 cmd.addr[1] = (u8)(column & 0x00ff);
434 cmd.n_tx = len;
435 cmd.tx_buf = wbuf;
436
437 return spinand_cmd(spi_nand, &cmd);
438}
439
440
441
442
443
444
445
446
447
448
449static int spinand_program_execute(struct spi_device *spi_nand, u16 page_id)
450{
451 struct spinand_cmd cmd = {0};
452 u16 row;
453
454 row = page_id;
455 cmd.cmd = CMD_PROG_PAGE_EXC;
456 cmd.n_addr = 3;
457 cmd.addr[1] = (u8)((row & 0xff00) >> 8);
458 cmd.addr[2] = (u8)(row & 0x00ff);
459
460 return spinand_cmd(spi_nand, &cmd);
461}
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476static int spinand_program_page(struct spi_device *spi_nand,
477 u16 page_id, u16 offset, u16 len, u8 *buf)
478{
479 int retval;
480 u8 status = 0;
481 u8 *wbuf;
482#ifdef CONFIG_MTD_SPINAND_ONDIEECC
483 unsigned int i, j;
484
485 wbuf = devm_kzalloc(&spi_nand->dev, CACHE_BUF, GFP_KERNEL);
486 if (!wbuf)
487 return -ENOMEM;
488
489 enable_read_hw_ecc = 0;
490 spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
491
492 for (i = offset, j = 0; i < len; i++, j++)
493 wbuf[i] &= buf[j];
494
495 if (enable_hw_ecc) {
496 retval = spinand_enable_ecc(spi_nand);
497 if (retval < 0) {
498 dev_err(&spi_nand->dev, "enable ecc failed!!\n");
499 return retval;
500 }
501 }
502#else
503 wbuf = buf;
504#endif
505 retval = spinand_write_enable(spi_nand);
506 if (retval < 0) {
507 dev_err(&spi_nand->dev, "write enable failed!!\n");
508 return retval;
509 }
510 if (wait_till_ready(spi_nand))
511 dev_err(&spi_nand->dev, "wait timedout!!!\n");
512
513 retval = spinand_program_data_to_cache(spi_nand, page_id,
514 offset, len, wbuf);
515 if (retval < 0)
516 return retval;
517 retval = spinand_program_execute(spi_nand, page_id);
518 if (retval < 0)
519 return retval;
520 while (1) {
521 retval = spinand_read_status(spi_nand, &status);
522 if (retval < 0) {
523 dev_err(&spi_nand->dev,
524 "error %d reading status register\n", retval);
525 return retval;
526 }
527
528 if ((status & STATUS_OIP_MASK) == STATUS_READY) {
529 if ((status & STATUS_P_FAIL_MASK) == STATUS_P_FAIL) {
530 dev_err(&spi_nand->dev,
531 "program error, page %d\n", page_id);
532 return -1;
533 }
534 break;
535 }
536 }
537#ifdef CONFIG_MTD_SPINAND_ONDIEECC
538 if (enable_hw_ecc) {
539 retval = spinand_disable_ecc(spi_nand);
540 if (retval < 0) {
541 dev_err(&spi_nand->dev, "disable ecc failed!!\n");
542 return retval;
543 }
544 enable_hw_ecc = 0;
545 }
546#endif
547
548 return 0;
549}
550
551
552
553
554
555
556
557
558
559
560static int spinand_erase_block_erase(struct spi_device *spi_nand, u16 block_id)
561{
562 struct spinand_cmd cmd = {0};
563 u16 row;
564
565 row = block_id;
566 cmd.cmd = CMD_ERASE_BLK;
567 cmd.n_addr = 3;
568 cmd.addr[1] = (u8)((row & 0xff00) >> 8);
569 cmd.addr[2] = (u8)(row & 0x00ff);
570
571 return spinand_cmd(spi_nand, &cmd);
572}
573
574
575
576
577
578
579
580
581
582
583
584
585static int spinand_erase_block(struct spi_device *spi_nand, u16 block_id)
586{
587 int retval;
588 u8 status = 0;
589
590 retval = spinand_write_enable(spi_nand);
591 if (wait_till_ready(spi_nand))
592 dev_err(&spi_nand->dev, "wait timedout!!!\n");
593
594 retval = spinand_erase_block_erase(spi_nand, block_id);
595 while (1) {
596 retval = spinand_read_status(spi_nand, &status);
597 if (retval < 0) {
598 dev_err(&spi_nand->dev,
599 "error %d reading status register\n", retval);
600 return retval;
601 }
602
603 if ((status & STATUS_OIP_MASK) == STATUS_READY) {
604 if ((status & STATUS_E_FAIL_MASK) == STATUS_E_FAIL) {
605 dev_err(&spi_nand->dev,
606 "erase error, block %d\n", block_id);
607 return -1;
608 }
609 break;
610 }
611 }
612 return 0;
613}
614
615#ifdef CONFIG_MTD_SPINAND_ONDIEECC
616static int spinand_write_page_hwecc(struct mtd_info *mtd,
617 struct nand_chip *chip,
618 const u8 *buf, int oob_required,
619 int page)
620{
621 const u8 *p = buf;
622 int eccsize = chip->ecc.size;
623 int eccsteps = chip->ecc.steps;
624
625 enable_hw_ecc = 1;
626 chip->write_buf(mtd, p, eccsize * eccsteps);
627 return 0;
628}
629
630static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
631 u8 *buf, int oob_required, int page)
632{
633 int retval;
634 u8 status;
635 u8 *p = buf;
636 int eccsize = chip->ecc.size;
637 int eccsteps = chip->ecc.steps;
638 struct spinand_info *info = nand_get_controller_data(chip);
639
640 enable_read_hw_ecc = 1;
641
642 chip->read_buf(mtd, p, eccsize * eccsteps);
643 if (oob_required)
644 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
645
646 while (1) {
647 retval = spinand_read_status(info->spi, &status);
648 if (retval < 0) {
649 dev_err(&mtd->dev,
650 "error %d reading status register\n", retval);
651 return retval;
652 }
653
654 if ((status & STATUS_OIP_MASK) == STATUS_READY) {
655 if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
656 pr_info("spinand: ECC error\n");
657 mtd->ecc_stats.failed++;
658 } else if ((status & STATUS_ECC_MASK) ==
659 STATUS_ECC_1BIT_CORRECTED)
660 mtd->ecc_stats.corrected++;
661 break;
662 }
663 }
664 return 0;
665}
666#endif
667
668static void spinand_select_chip(struct mtd_info *mtd, int dev)
669{
670}
671
672static u8 spinand_read_byte(struct mtd_info *mtd)
673{
674 struct spinand_state *state = mtd_to_state(mtd);
675 u8 data;
676
677 data = state->buf[state->buf_ptr];
678 state->buf_ptr++;
679 return data;
680}
681
682static int spinand_wait(struct mtd_info *mtd, struct nand_chip *chip)
683{
684 struct spinand_info *info = nand_get_controller_data(chip);
685
686 unsigned long timeo = jiffies;
687 int retval, state = chip->state;
688 u8 status;
689
690 if (state == FL_ERASING)
691 timeo += (HZ * 400) / 1000;
692 else
693 timeo += (HZ * 20) / 1000;
694
695 while (time_before(jiffies, timeo)) {
696 retval = spinand_read_status(info->spi, &status);
697 if (retval < 0) {
698 dev_err(&mtd->dev,
699 "error %d reading status register\n", retval);
700 return retval;
701 }
702
703 if ((status & STATUS_OIP_MASK) == STATUS_READY)
704 return 0;
705
706 cond_resched();
707 }
708 return 0;
709}
710
711static void spinand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
712{
713 struct spinand_state *state = mtd_to_state(mtd);
714
715 memcpy(state->buf + state->buf_ptr, buf, len);
716 state->buf_ptr += len;
717}
718
719static void spinand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
720{
721 struct spinand_state *state = mtd_to_state(mtd);
722
723 memcpy(buf, state->buf + state->buf_ptr, len);
724 state->buf_ptr += len;
725}
726
727
728
729
730static void spinand_reset(struct spi_device *spi_nand)
731{
732 struct spinand_cmd cmd = {0};
733
734 cmd.cmd = CMD_RESET;
735
736 if (spinand_cmd(spi_nand, &cmd) < 0)
737 pr_info("spinand reset failed!\n");
738
739
740 usleep_range(1000, 2000);
741
742 if (wait_till_ready(spi_nand))
743 dev_err(&spi_nand->dev, "wait timedout!\n");
744}
745
746static void spinand_cmdfunc(struct mtd_info *mtd, unsigned int command,
747 int column, int page)
748{
749 struct nand_chip *chip = mtd_to_nand(mtd);
750 struct spinand_info *info = nand_get_controller_data(chip);
751 struct spinand_state *state = info->priv;
752
753 switch (command) {
754
755
756
757 case NAND_CMD_READ1:
758 case NAND_CMD_READ0:
759 state->buf_ptr = 0;
760 spinand_read_page(info->spi, page, 0x0, 0x840, state->buf);
761 break;
762
763 case NAND_CMD_READOOB:
764 state->buf_ptr = 0;
765 spinand_read_page(info->spi, page, 0x800, 0x40, state->buf);
766 break;
767 case NAND_CMD_RNDOUT:
768 state->buf_ptr = column;
769 break;
770 case NAND_CMD_READID:
771 state->buf_ptr = 0;
772 spinand_read_id(info->spi, state->buf);
773 break;
774 case NAND_CMD_PARAM:
775 state->buf_ptr = 0;
776 break;
777
778 case NAND_CMD_ERASE1:
779 spinand_erase_block(info->spi, page);
780 break;
781
782 case NAND_CMD_ERASE2:
783 break;
784
785 case NAND_CMD_SEQIN:
786 state->col = column;
787 state->row = page;
788 state->buf_ptr = 0;
789 break;
790
791 case NAND_CMD_PAGEPROG:
792 spinand_program_page(info->spi, state->row, state->col,
793 state->buf_ptr, state->buf);
794 break;
795 case NAND_CMD_STATUS:
796 spinand_get_otp(info->spi, state->buf);
797 if (!(state->buf[0] & 0x80))
798 state->buf[0] = 0x80;
799 state->buf_ptr = 0;
800 break;
801
802 case NAND_CMD_RESET:
803 if (wait_till_ready(info->spi))
804 dev_err(&info->spi->dev, "WAIT timedout!!!\n");
805
806 usleep_range(250, 1000);
807 spinand_reset(info->spi);
808 break;
809 default:
810 dev_err(&mtd->dev, "Unknown CMD: 0x%x\n", command);
811 }
812}
813
814
815
816
817
818
819
820
821static int spinand_lock_block(struct spi_device *spi_nand, u8 lock)
822{
823 struct spinand_cmd cmd = {0};
824 int ret;
825 u8 otp = 0;
826
827 ret = spinand_get_otp(spi_nand, &otp);
828
829 cmd.cmd = CMD_WRITE_REG;
830 cmd.n_addr = 1;
831 cmd.addr[0] = REG_BLOCK_LOCK;
832 cmd.n_tx = 1;
833 cmd.tx_buf = &lock;
834
835 ret = spinand_cmd(spi_nand, &cmd);
836 if (ret < 0)
837 dev_err(&spi_nand->dev, "error %d lock block\n", ret);
838
839 return ret;
840}
841
842
843
844
845
846
847
848
849static int spinand_probe(struct spi_device *spi_nand)
850{
851 struct mtd_info *mtd;
852 struct nand_chip *chip;
853 struct spinand_info *info;
854 struct spinand_state *state;
855
856 info = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_info),
857 GFP_KERNEL);
858 if (!info)
859 return -ENOMEM;
860
861 info->spi = spi_nand;
862
863 spinand_lock_block(spi_nand, BL_ALL_UNLOCKED);
864
865 state = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_state),
866 GFP_KERNEL);
867 if (!state)
868 return -ENOMEM;
869
870 info->priv = state;
871 state->buf_ptr = 0;
872 state->buf = devm_kzalloc(&spi_nand->dev, BUFSIZE, GFP_KERNEL);
873 if (!state->buf)
874 return -ENOMEM;
875
876 chip = devm_kzalloc(&spi_nand->dev, sizeof(struct nand_chip),
877 GFP_KERNEL);
878 if (!chip)
879 return -ENOMEM;
880
881#ifdef CONFIG_MTD_SPINAND_ONDIEECC
882 chip->ecc.mode = NAND_ECC_HW;
883 chip->ecc.size = 0x200;
884 chip->ecc.bytes = 0x6;
885 chip->ecc.steps = 0x4;
886
887 chip->ecc.strength = 1;
888 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
889 chip->ecc.layout = &spinand_oob_64;
890 chip->ecc.read_page = spinand_read_page_hwecc;
891 chip->ecc.write_page = spinand_write_page_hwecc;
892#else
893 chip->ecc.mode = NAND_ECC_SOFT;
894 if (spinand_disable_ecc(spi_nand) < 0)
895 dev_info(&spi_nand->dev, "%s: disable ecc failed!\n",
896 __func__);
897#endif
898
899 nand_set_flash_node(chip, spi_nand->dev.of_node);
900 nand_set_controller_data(chip, info);
901 chip->read_buf = spinand_read_buf;
902 chip->write_buf = spinand_write_buf;
903 chip->read_byte = spinand_read_byte;
904 chip->cmdfunc = spinand_cmdfunc;
905 chip->waitfunc = spinand_wait;
906 chip->options |= NAND_CACHEPRG;
907 chip->select_chip = spinand_select_chip;
908
909 mtd = nand_to_mtd(chip);
910
911 dev_set_drvdata(&spi_nand->dev, mtd);
912
913 mtd->dev.parent = &spi_nand->dev;
914 mtd->oobsize = 64;
915
916 if (nand_scan(mtd, 1))
917 return -ENXIO;
918
919 return mtd_device_register(mtd, NULL, 0);
920}
921
922
923
924
925
926
927
928
929static int spinand_remove(struct spi_device *spi)
930{
931 mtd_device_unregister(dev_get_drvdata(&spi->dev));
932
933 return 0;
934}
935
936static const struct of_device_id spinand_dt[] = {
937 { .compatible = "spinand,mt29f", },
938 {}
939};
940MODULE_DEVICE_TABLE(of, spinand_dt);
941
942
943
944
945static struct spi_driver spinand_driver = {
946 .driver = {
947 .name = "mt29f",
948 .of_match_table = spinand_dt,
949 },
950 .probe = spinand_probe,
951 .remove = spinand_remove,
952};
953
954module_spi_driver(spinand_driver);
955
956MODULE_DESCRIPTION("SPI NAND driver for Micron");
957MODULE_AUTHOR("Henry Pan <hspan@micron.com>, Kamlakant Patel <kamlakant.patel@broadcom.com>");
958MODULE_LICENSE("GPL v2");
959