1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/mutex.h>
18#include <linux/math64.h>
19#include <linux/sizes.h>
20
21#include <linux/mtd/mtd.h>
22#include <linux/of_platform.h>
23#include <linux/spi/flash.h>
24#include <linux/mtd/spi-nor.h>
25#include <linux/spi/spi.h>
26
27
28
29
30
31
32
33#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
34
35
36
37
38
39#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
40
41#define SPI_NOR_MAX_ID_LEN 6
42#define SPI_NOR_MAX_ADDR_WIDTH 4
43
44struct flash_info {
45 char *name;
46
47
48
49
50
51
52 u8 id[SPI_NOR_MAX_ID_LEN];
53 u8 id_len;
54
55
56
57
58 unsigned sector_size;
59 u16 n_sectors;
60
61 u16 page_size;
62 u16 addr_width;
63
64 u16 flags;
65#define SECT_4K BIT(0)
66#define SPI_NOR_NO_ERASE BIT(1)
67#define SST_WRITE BIT(2)
68#define SPI_NOR_NO_FR BIT(3)
69#define SECT_4K_PMC BIT(4)
70#define SPI_NOR_DUAL_READ BIT(5)
71#define SPI_NOR_QUAD_READ BIT(6)
72#define USE_FSR BIT(7)
73#define SPI_NOR_HAS_LOCK BIT(8)
74#define SPI_NOR_HAS_TB BIT(9)
75
76
77
78
79#define SPI_NOR_QUAD_IO_READ BIT(10)
80
81#define SST_GLOBAL_PROT_UNLK BIT(11)
82};
83
84#define JEDEC_MFR(info) ((info)->id[0])
85
86static const struct flash_info *spi_nor_match_id(const char *name);
87
88
89
90
91
92
93static int read_sr(struct spi_nor *nor)
94{
95 int ret;
96 u8 val[2];
97
98 if (nor->isparallel) {
99 ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val[0], 2);
100 if (ret < 0) {
101 pr_err("error %d reading SR\n", (int) ret);
102 return ret;
103 }
104 val[0] &= val[1];
105 } else {
106 ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val[0], 1);
107 if (ret < 0) {
108 pr_err("error %d reading SR\n", (int) ret);
109 return ret;
110 }
111 }
112
113 return val[0];
114}
115
116
117
118
119
120
121static int read_fsr(struct spi_nor *nor)
122{
123 int ret;
124 u8 val[2];
125
126 if (nor->isparallel) {
127 ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val[0], 2);
128 if (ret < 0) {
129 pr_err("error %d reading FSR\n", ret);
130 return ret;
131 }
132 val[0] &= val[1];
133 } else {
134 ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val[0], 1);
135 if (ret < 0) {
136 pr_err("error %d reading FSR\n", ret);
137 return ret;
138 }
139 }
140
141 return val[0];
142}
143
144
145
146
147
148
149static int read_cr(struct spi_nor *nor)
150{
151 int ret;
152 u8 val;
153
154 ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
155 if (ret < 0) {
156 dev_err(nor->dev, "error %d reading CR\n", ret);
157 return ret;
158 }
159
160 return val;
161}
162
163
164
165
166
167
168static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
169{
170 switch (nor->flash_read) {
171 case SPI_NOR_FAST:
172 case SPI_NOR_DUAL:
173 case SPI_NOR_QUAD:
174 return 8;
175 case SPI_NOR_QUAD_IO:
176 return 40;
177 case SPI_NOR_NORMAL:
178 return 0;
179 }
180 return 0;
181}
182
183
184
185
186
187static inline int write_sr(struct spi_nor *nor, u8 val)
188{
189 nor->cmd_buf[0] = val;
190 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
191}
192
193
194
195
196
197
198
199static int write_sr_cr(struct spi_nor *nor, u16 val)
200{
201 nor->cmd_buf[0] = val & 0xff;
202 nor->cmd_buf[1] = (val >> 8);
203
204 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2);
205}
206
207
208
209
210
211static inline int write_enable(struct spi_nor *nor)
212{
213 return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
214}
215
216
217
218
219static inline int write_disable(struct spi_nor *nor)
220{
221 return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
222}
223
224static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
225{
226 return mtd->priv;
227}
228
229
230static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
231 int enable)
232{
233 int status;
234 bool need_wren = false;
235 u8 cmd;
236
237 switch (JEDEC_MFR(info)) {
238 case SNOR_MFR_MICRON:
239
240 need_wren = true;
241 case SNOR_MFR_MACRONIX:
242 case SNOR_MFR_WINBOND:
243 if (need_wren)
244 write_enable(nor);
245
246 cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
247 status = nor->write_reg(nor, cmd, NULL, 0);
248 if (need_wren)
249 write_disable(nor);
250
251 return status;
252 default:
253
254 nor->cmd_buf[0] = enable << 7;
255 return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
256 }
257}
258
259
260
261
262
263
264
265
266
267static int read_ear(struct spi_nor *nor, struct flash_info *info)
268{
269 int ret;
270 u8 val;
271 u8 code;
272
273
274 if (JEDEC_MFR(info) == CFI_MFR_AMD)
275 code = SPINOR_OP_BRRD;
276
277 else if (JEDEC_MFR(info) == CFI_MFR_ST)
278 code = SPINOR_OP_RDEAR;
279 else
280 return -EINVAL;
281
282 ret = nor->read_reg(nor, code, &val, 1);
283 if (ret < 0)
284 return ret;
285
286 return val;
287}
288
289static inline int spi_nor_sr_ready(struct spi_nor *nor)
290{
291 int sr = read_sr(nor);
292 if (sr < 0)
293 return sr;
294 else
295 return !(sr & SR_WIP);
296}
297
298static inline int spi_nor_fsr_ready(struct spi_nor *nor)
299{
300 int fsr = read_fsr(nor);
301 if (fsr < 0)
302 return fsr;
303 else
304 return fsr & FSR_READY;
305}
306
307static int spi_nor_ready(struct spi_nor *nor)
308{
309 int sr, fsr;
310 sr = spi_nor_sr_ready(nor);
311 if (sr < 0)
312 return sr;
313 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
314 if (fsr < 0)
315 return fsr;
316 return sr && fsr;
317}
318
319
320
321
322
323static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
324 unsigned long timeout_jiffies)
325{
326 unsigned long deadline;
327 int timeout = 0, ret;
328
329 deadline = jiffies + timeout_jiffies;
330
331 while (!timeout) {
332 if (time_after_eq(jiffies, deadline))
333 timeout = 1;
334
335 ret = spi_nor_ready(nor);
336 if (ret < 0)
337 return ret;
338 if (ret)
339 return 0;
340
341 cond_resched();
342 }
343
344 dev_err(nor->dev, "flash operation timed out\n");
345
346 return -ETIMEDOUT;
347}
348
349static int spi_nor_wait_till_ready(struct spi_nor *nor)
350{
351 return spi_nor_wait_till_ready_with_timeout(nor,
352 DEFAULT_READY_WAIT_JIFFIES);
353}
354
355
356
357
358
359static int write_ear(struct spi_nor *nor, u32 addr)
360{
361 u8 code;
362 u8 ear;
363 int ret;
364 struct mtd_info *mtd = &nor->mtd;
365
366
367 if (spi_nor_wait_till_ready(nor))
368 return 1;
369
370 if (mtd->size <= (0x1000000) << nor->shift)
371 return 0;
372
373 addr = addr % (u32) mtd->size;
374 ear = addr >> 24;
375
376 if ((!nor->isstacked) && (ear == nor->curbank))
377 return 0;
378
379 if (nor->isstacked && (mtd->size <= 0x2000000))
380 return 0;
381
382 if (nor->jedec_id == CFI_MFR_AMD)
383 code = SPINOR_OP_BRWR;
384 if (nor->jedec_id == CFI_MFR_ST) {
385 write_enable(nor);
386 code = SPINOR_OP_WREAR;
387 }
388 nor->cmd_buf[0] = ear;
389
390 ret = nor->write_reg(nor, code, nor->cmd_buf, 1);
391 if (ret < 0)
392 return ret;
393
394 nor->curbank = ear;
395
396 return 0;
397}
398
399
400
401
402
403
404static int erase_chip(struct spi_nor *nor)
405{
406 int ret;
407 struct mtd_info *mtd = &nor->mtd;
408
409 dev_dbg(nor->dev, " %lldKiB\n", (long long)(mtd->size >> 10));
410
411
412 ret = spi_nor_wait_till_ready(nor);
413 if (ret)
414 return ret;
415
416 if (nor->isstacked)
417 nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
418
419
420 write_enable(nor);
421
422 ret = nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
423 if (ret)
424 return ret;
425
426 if (nor->isstacked) {
427
428 ret = spi_nor_wait_till_ready(nor);
429 if (ret)
430 return ret;
431
432 nor->spi->master->flags |= SPI_MASTER_U_PAGE;
433
434
435 write_enable(nor);
436
437 ret = nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
438 }
439
440 return ret;
441}
442
443static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
444{
445 int ret = 0;
446
447 mutex_lock(&nor->lock);
448
449 if (nor->prepare) {
450 ret = nor->prepare(nor, ops);
451 if (ret) {
452 dev_err(nor->dev, "failed in the preparation.\n");
453 mutex_unlock(&nor->lock);
454 return ret;
455 }
456 }
457 return ret;
458}
459
460static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
461{
462 if (nor->unprepare)
463 nor->unprepare(nor, ops);
464 mutex_unlock(&nor->lock);
465}
466
467
468
469
470static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
471{
472 u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
473 int i;
474
475 if (nor->erase)
476 return nor->erase(nor, addr);
477
478
479
480
481
482 for (i = nor->addr_width - 1; i >= 0; i--) {
483 buf[i] = addr & 0xff;
484 addr >>= 8;
485 }
486
487 return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
488}
489
490
491
492
493
494static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
495{
496 struct spi_nor *nor = mtd_to_spi_nor(mtd);
497 u32 addr, len, offset;
498 uint32_t rem;
499 int ret;
500
501 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
502 (long long)instr->len);
503
504 div_u64_rem(instr->len, mtd->erasesize, &rem);
505 if (rem)
506 return -EINVAL;
507
508 addr = instr->addr;
509 len = instr->len;
510
511 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
512 if (ret)
513 return ret;
514
515
516 if (len == mtd->size) {
517 unsigned long timeout;
518
519 write_enable(nor);
520
521 if (erase_chip(nor)) {
522 ret = -EIO;
523 goto erase_err;
524 }
525
526
527
528
529
530
531
532 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
533 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
534 (unsigned long)(mtd->size / SZ_2M));
535 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
536 if (ret)
537 goto erase_err;
538
539
540
541
542
543
544
545 } else {
546 while (len) {
547 offset = addr;
548 if (nor->isparallel == 1)
549 offset /= 2;
550 if (nor->isstacked == 1) {
551 if (offset >= (mtd->size / 2)) {
552 offset = offset - (mtd->size / 2);
553 nor->spi->master->flags |=
554 SPI_MASTER_U_PAGE;
555 } else
556 nor->spi->master->flags &=
557 ~SPI_MASTER_U_PAGE;
558 }
559
560
561 ret = spi_nor_wait_till_ready(nor);
562 if (ret)
563 goto erase_err;
564
565 if (nor->addr_width == 3) {
566
567 ret = write_ear(nor, offset);
568 if (ret)
569 goto erase_err;
570 }
571
572 ret = spi_nor_wait_till_ready(nor);
573 if (ret)
574 goto erase_err;
575
576 write_enable(nor);
577
578 ret = spi_nor_erase_sector(nor, offset);
579 if (ret)
580 goto erase_err;
581
582 addr += mtd->erasesize;
583 len -= mtd->erasesize;
584
585 ret = spi_nor_wait_till_ready(nor);
586 if (ret)
587 goto erase_err;
588 }
589 }
590
591 write_disable(nor);
592
593erase_err:
594 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
595
596 instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
597 mtd_erase_callback(instr);
598
599 return ret;
600}
601
602static inline uint16_t min_lockable_sectors(struct spi_nor *nor,
603 uint16_t n_sectors)
604{
605 uint16_t lock_granularity;
606
607
608
609
610
611 lock_granularity = max(1, n_sectors/M25P_MAX_LOCKABLE_SECTORS);
612 if (nor->jedec_id == CFI_MFR_ST)
613 lock_granularity = 1;
614
615 return lock_granularity;
616}
617
618static inline uint32_t get_protected_area_start(struct spi_nor *nor,
619 uint8_t lock_bits)
620{
621 u16 n_sectors;
622 u32 sector_size;
623 uint64_t mtd_size;
624 struct mtd_info *mtd = &nor->mtd;
625
626 n_sectors = nor->n_sectors;
627 sector_size = nor->sector_size;
628 mtd_size = mtd->size;
629
630 if (nor->isparallel) {
631 sector_size = (nor->sector_size >> 1);
632 mtd_size = (mtd->size >> 1);
633 }
634 if (nor->isstacked) {
635 n_sectors = (nor->n_sectors >> 1);
636 mtd_size = (mtd->size >> 1);
637 }
638
639 return mtd_size - (1<<(lock_bits-1)) *
640 min_lockable_sectors(nor, n_sectors) * sector_size;
641}
642
643static uint8_t min_protected_area_including_offset(struct spi_nor *nor,
644 uint32_t offset)
645{
646 uint8_t lock_bits, lockbits_limit;
647
648
649
650
651
652
653 lockbits_limit = 7;
654 if (nor->jedec_id == CFI_MFR_ST)
655 lockbits_limit = 15;
656
657 for (lock_bits = 1; lock_bits < lockbits_limit; lock_bits++) {
658 if (offset >= get_protected_area_start(nor, lock_bits))
659 break;
660 }
661 return lock_bits;
662}
663
664static int write_sr_modify_protection(struct spi_nor *nor, uint8_t status,
665 uint8_t lock_bits)
666{
667 uint8_t status_new, bp_mask;
668 u16 val;
669
670 status_new = status & ~SR_BP_BIT_MASK;
671 bp_mask = (lock_bits << SR_BP_BIT_OFFSET) & SR_BP_BIT_MASK;
672
673
674 if (nor->jedec_id == CFI_MFR_ST) {
675
676 status_new &= ~SR_BP3;
677
678
679 status_new &= ~SR_BP_TB;
680
681 if (lock_bits > 7)
682 bp_mask |= SR_BP3;
683 }
684
685 status_new |= bp_mask;
686
687 write_enable(nor);
688
689
690 if (nor->jedec_id == CFI_MFR_AMD) {
691 val = read_cr(nor) << 8;
692 val |= status_new;
693 if (write_sr_cr(nor, val) < 0)
694 return 1;
695 } else {
696 if (write_sr(nor, status_new) < 0)
697 return 1;
698 }
699 return 0;
700}
701
702static uint8_t bp_bits_from_sr(struct spi_nor *nor, uint8_t status)
703{
704 uint8_t ret;
705
706 ret = (((status) & SR_BP_BIT_MASK) >> SR_BP_BIT_OFFSET);
707 if (nor->jedec_id == 0x20)
708 ret |= ((status & SR_BP3) >> (SR_BP_BIT_OFFSET + 1));
709
710 return ret;
711}
712
713static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
714 uint64_t *len)
715{
716 struct mtd_info *mtd = &nor->mtd;
717 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
718 int shift = ffs(mask) - 1;
719 int pow;
720
721 if (!(sr & mask)) {
722
723 *ofs = 0;
724 *len = 0;
725 } else {
726 pow = ((sr & mask) ^ mask) >> shift;
727 *len = mtd->size >> pow;
728 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
729 *ofs = 0;
730 else
731 *ofs = mtd->size - *len;
732 }
733}
734
735
736
737
738
739static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
740 u8 sr, bool locked)
741{
742 loff_t lock_offs;
743 uint64_t lock_len;
744
745 if (!len)
746 return 1;
747
748 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
749
750 if (locked)
751
752 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
753 else
754
755 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
756}
757
758static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
759 u8 sr)
760{
761 return stm_check_lock_status_sr(nor, ofs, len, sr, true);
762}
763
764static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
765 u8 sr)
766{
767 return stm_check_lock_status_sr(nor, ofs, len, sr, false);
768}
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
803{
804 struct mtd_info *mtd = &nor->mtd;
805 int status_old, status_new;
806 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
807 u8 shift = ffs(mask) - 1, pow, val;
808 loff_t lock_len;
809 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
810 bool use_top;
811 int ret;
812
813 status_old = read_sr(nor);
814 if (status_old < 0)
815 return status_old;
816
817
818 if (stm_is_locked_sr(nor, ofs, len, status_old))
819 return 0;
820
821
822 if (!stm_is_locked_sr(nor, 0, ofs, status_old))
823 can_be_bottom = false;
824
825
826 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
827 status_old))
828 can_be_top = false;
829
830 if (!can_be_bottom && !can_be_top)
831 return -EINVAL;
832
833
834 use_top = can_be_top;
835
836
837 if (use_top)
838 lock_len = mtd->size - ofs;
839 else
840 lock_len = ofs + len;
841
842
843
844
845
846
847
848
849
850
851 pow = ilog2(mtd->size) - ilog2(lock_len);
852 val = mask - (pow << shift);
853 if (val & ~mask)
854 return -EINVAL;
855
856 if (!(val & mask))
857 return -EINVAL;
858
859 status_new = (status_old & ~mask & ~SR_TB) | val;
860
861
862 status_new |= SR_SRWD;
863
864 if (!use_top)
865 status_new |= SR_TB;
866
867
868 if (status_new == status_old)
869 return 0;
870
871
872 if ((status_new & mask) < (status_old & mask))
873 return -EINVAL;
874
875 write_enable(nor);
876 ret = write_sr(nor, status_new);
877 if (ret)
878 return ret;
879 return spi_nor_wait_till_ready(nor);
880}
881
882
883
884
885
886
887static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
888{
889 struct mtd_info *mtd = &nor->mtd;
890 int status_old, status_new;
891 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
892 u8 shift = ffs(mask) - 1, pow, val;
893 loff_t lock_len;
894 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
895 bool use_top;
896 int ret;
897
898 status_old = read_sr(nor);
899 if (status_old < 0)
900 return status_old;
901
902
903 if (stm_is_unlocked_sr(nor, ofs, len, status_old))
904 return 0;
905
906
907 if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
908 can_be_top = false;
909
910
911 if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
912 status_old))
913 can_be_bottom = false;
914
915 if (!can_be_bottom && !can_be_top)
916 return -EINVAL;
917
918
919 use_top = can_be_top;
920
921
922 if (use_top)
923 lock_len = mtd->size - (ofs + len);
924 else
925 lock_len = ofs;
926
927
928
929
930
931
932
933
934
935
936 pow = ilog2(mtd->size) - order_base_2(lock_len);
937 if (lock_len == 0) {
938 val = 0;
939 } else {
940 val = mask - (pow << shift);
941
942 if (val & ~mask)
943 return -EINVAL;
944 }
945
946 status_new = (status_old & ~mask & ~SR_TB) | val;
947
948
949 if (lock_len == mtd->size)
950 status_new &= ~SR_SRWD;
951
952 if (!use_top)
953 status_new |= SR_TB;
954
955
956 if (status_new == status_old)
957 return 0;
958
959
960 if ((status_new & mask) > (status_old & mask))
961 return -EINVAL;
962
963 write_enable(nor);
964 ret = write_sr(nor, status_new);
965 if (ret)
966 return ret;
967 return spi_nor_wait_till_ready(nor);
968}
969
970
971
972
973
974
975
976
977static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
978{
979 int status;
980
981 status = read_sr(nor);
982 if (status < 0)
983 return status;
984
985 return stm_is_locked_sr(nor, ofs, len, status);
986}
987
988static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
989{
990 struct spi_nor *nor = mtd_to_spi_nor(mtd);
991 uint32_t offset = ofs;
992 uint8_t status;
993 uint8_t lock_bits;
994 int ret = 0;
995
996 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
997 if (ret)
998 return ret;
999
1000 if (nor->isparallel == 1)
1001 offset /= 2;
1002
1003 if (nor->isstacked == 1) {
1004 if (offset >= (mtd->size / 2)) {
1005 offset = offset - (mtd->size / 2);
1006 nor->spi->master->flags |= SPI_MASTER_U_PAGE;
1007 } else
1008 nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
1009 }
1010
1011
1012 ret = spi_nor_wait_till_ready(nor);
1013 if (ret)
1014 goto err;
1015
1016 status = read_sr(nor);
1017
1018 lock_bits = min_protected_area_including_offset(nor, offset);
1019
1020
1021 if (lock_bits > bp_bits_from_sr(nor, status))
1022 ret = write_sr_modify_protection(nor, status, lock_bits);
1023 else
1024 dev_err(nor->dev, "trying to unlock already locked area\n");
1025
1026err:
1027 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1028 return ret;
1029}
1030
1031static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1032{
1033 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1034 uint32_t offset = ofs;
1035 uint8_t status;
1036 uint8_t lock_bits;
1037 int ret = 0;
1038
1039 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1040 if (ret)
1041 return ret;
1042
1043 if (nor->isparallel == 1)
1044 offset /= 2;
1045
1046 if (nor->isstacked == 1) {
1047 if (offset >= (mtd->size / 2)) {
1048 offset = offset - (mtd->size / 2);
1049 nor->spi->master->flags |= SPI_MASTER_U_PAGE;
1050 } else
1051 nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
1052 }
1053
1054
1055 ret = spi_nor_wait_till_ready(nor);
1056 if (ret)
1057 goto err;
1058
1059 status = read_sr(nor);
1060
1061 lock_bits = min_protected_area_including_offset(nor, offset+len) - 1;
1062
1063
1064 if (lock_bits < bp_bits_from_sr(nor, status))
1065 ret = write_sr_modify_protection(nor, status, lock_bits);
1066 else
1067 dev_err(nor->dev, "trying to lock already unlocked area\n");
1068
1069err:
1070 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
1071 return ret;
1072}
1073
1074static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1075{
1076 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1077 uint32_t offset = ofs;
1078 uint32_t protected_area_start;
1079 uint8_t status;
1080 int ret;
1081
1082 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1083 if (ret)
1084 return ret;
1085
1086 ret = spi_nor_wait_till_ready(nor);
1087 if (ret)
1088 goto err;
1089 status = read_sr(nor);
1090
1091 protected_area_start = get_protected_area_start(nor,
1092 bp_bits_from_sr(nor, status));
1093 if (offset >= protected_area_start)
1094 ret = MTD_IS_LOCKED;
1095 else if (offset+len < protected_area_start)
1096 ret = MTD_IS_UNLOCKED;
1097 else
1098 ret = MTD_IS_PARTIALLY_LOCKED;
1099
1100err:
1101 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
1102 return ret;
1103}
1104
1105
1106#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
1107 .id = { \
1108 ((_jedec_id) >> 16) & 0xff, \
1109 ((_jedec_id) >> 8) & 0xff, \
1110 (_jedec_id) & 0xff, \
1111 ((_ext_id) >> 8) & 0xff, \
1112 (_ext_id) & 0xff, \
1113 }, \
1114 .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
1115 .sector_size = (_sector_size), \
1116 .n_sectors = (_n_sectors), \
1117 .page_size = 256, \
1118 .flags = (_flags),
1119
1120#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
1121 .id = { \
1122 ((_jedec_id) >> 16) & 0xff, \
1123 ((_jedec_id) >> 8) & 0xff, \
1124 (_jedec_id) & 0xff, \
1125 ((_ext_id) >> 16) & 0xff, \
1126 ((_ext_id) >> 8) & 0xff, \
1127 (_ext_id) & 0xff, \
1128 }, \
1129 .id_len = 6, \
1130 .sector_size = (_sector_size), \
1131 .n_sectors = (_n_sectors), \
1132 .page_size = 256, \
1133 .flags = (_flags),
1134
1135#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
1136 .sector_size = (_sector_size), \
1137 .n_sectors = (_n_sectors), \
1138 .page_size = (_page_size), \
1139 .addr_width = (_addr_width), \
1140 .flags = (_flags),
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153static const struct flash_info spi_nor_ids[] = {
1154
1155 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
1156 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
1157
1158 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
1159 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
1160 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
1161
1162 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
1163 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
1164 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
1165 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
1166
1167 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
1168
1169
1170 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
1171 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
1172 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
1173 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
1174 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
1175 { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
1176 { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
1177 { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
1178
1179
1180 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
1181
1182
1183 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1184 { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1185
1186
1187 { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
1188
1189
1190 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
1191 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
1192 { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
1193
1194
1195 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
1196 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
1197 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
1198
1199
1200 { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
1201
1202
1203 { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
1204 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
1205 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
1206 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
1207 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
1208 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
1209 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
1210 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
1211 { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
1212 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
1213 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
1214 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
1215 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
1216 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
1217 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
1218
1219
1220 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
1221 { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
1222 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
1223 { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
1224 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
1225 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
1226 { "n25q256a", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
1227 { "n25q256a13", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
1228 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
1229 { "n25q512a13", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
1230 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
1231 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
1232 { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
1233
1234
1235 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
1236 { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
1237 { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
1238
1239
1240
1241
1242 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1243 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1244 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
1245 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1246 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1247 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1248 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, SPI_NOR_HAS_LOCK) },
1249 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, SPI_NOR_HAS_LOCK) },
1250 { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
1251 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
1252 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
1253 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
1254 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
1255 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
1256 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
1257 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
1258 { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1259 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1260 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1261 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
1262 { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1263 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
1264 { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
1265 { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
1266
1267
1268 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
1269 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
1270 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
1271 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
1272 { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
1273 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
1274 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
1275 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
1276 { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
1277 { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
1278 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
1279 { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
1280 { "sst26wf016B", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
1281 SST_GLOBAL_PROT_UNLK) },
1282
1283
1284 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
1285 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
1286 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
1287 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
1288 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
1289 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
1290 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
1291 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
1292 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
1293
1294 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
1295 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
1296 { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
1297 { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
1298 { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
1299 { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
1300 { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
1301 { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
1302 { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
1303
1304 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
1305 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
1306 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
1307
1308 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
1309 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
1310 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
1311
1312 { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
1313 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
1314 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
1315 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
1316 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
1317 { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
1318
1319
1320 { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
1321 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
1322 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
1323 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
1324 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
1325 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
1326 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
1327 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
1328 {
1329 "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
1330 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1331 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1332 },
1333 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
1334 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
1335 {
1336 "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
1337 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1338 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1339 },
1340 {
1341 "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
1342 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1343 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1344 },
1345 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
1346 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
1347 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1348 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1349
1350
1351 { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1352 { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1353 { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1354 { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1355 { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1356
1357 { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
1358 SECT_4K | SPI_NOR_QUAD_IO_READ) },
1359 { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
1360 SECT_4K | SPI_NOR_QUAD_IO_READ) },
1361 { "is25lp128", INFO(0x9D6018, 0, 64 * 1024, 256,
1362 SECT_4K | SPI_NOR_QUAD_IO_READ) },
1363 { },
1364};
1365
1366static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
1367{
1368 int tmp;
1369 u8 id[SPI_NOR_MAX_ID_LEN];
1370 const struct flash_info *info;
1371 nor->spi->master->flags &= ~(SPI_BOTH_FLASH | SPI_DATA_STRIPE);
1372
1373
1374 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
1375 if (tmp < 0) {
1376 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
1377 return ERR_PTR(tmp);
1378 }
1379
1380 for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
1381 info = &spi_nor_ids[tmp];
1382 if (info->id_len) {
1383 if (!memcmp(info->id, id, info->id_len))
1384 return &spi_nor_ids[tmp];
1385 }
1386 }
1387 dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
1388 id[0], id[1], id[2]);
1389 return ERR_PTR(-ENODEV);
1390}
1391
1392static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
1393 size_t *retlen, u_char *buf)
1394{
1395 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1396 int ret;
1397
1398 ret = nor->read(nor, from, len, retlen, buf);
1399
1400 return ret;
1401}
1402
1403static int spi_nor_read_ext(struct mtd_info *mtd, loff_t from, size_t len,
1404 size_t *retlen, u_char *buf)
1405{
1406 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1407 u32 addr = from;
1408 u32 offset = from;
1409 u32 read_len = 0;
1410 size_t actual_len = 0;
1411 u32 read_count = 0;
1412 u32 rem_bank_len = 0;
1413 u8 bank = 0;
1414 u8 stack_shift = 0;
1415 int ret;
1416
1417#define OFFSET_16_MB 0x1000000
1418
1419 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
1420
1421 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
1422 if (ret)
1423 return ret;
1424
1425 while (len) {
1426 if (nor->addr_width == 3) {
1427 bank = addr / (OFFSET_16_MB << nor->shift);
1428 rem_bank_len = ((OFFSET_16_MB << nor->shift) *
1429 (bank + 1)) - addr;
1430 }
1431 offset = addr;
1432 if (nor->isparallel == 1)
1433 offset /= 2;
1434 if (nor->isstacked == 1) {
1435 stack_shift = 1;
1436 if (offset >= (mtd->size / 2)) {
1437 offset = offset - (mtd->size / 2);
1438 nor->spi->master->flags |= SPI_MASTER_U_PAGE;
1439 } else {
1440 nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
1441 }
1442 }
1443
1444 if (nor->addr_width == 4) {
1445 rem_bank_len = (mtd->size >> stack_shift) -
1446 (offset << nor->shift);
1447 }
1448 if (nor->addr_width == 3)
1449 write_ear(nor, offset);
1450 if (len < rem_bank_len)
1451 read_len = len;
1452 else
1453 read_len = rem_bank_len;
1454
1455
1456 ret = spi_nor_wait_till_ready(nor);
1457 if (ret)
1458 goto read_err;
1459
1460 ret = spi_nor_read(mtd, offset, read_len, &actual_len, buf);
1461 if (ret)
1462 return ret;
1463
1464 addr += actual_len;
1465 len -= actual_len;
1466 buf += actual_len;
1467 read_count += actual_len;
1468 }
1469
1470 *retlen = read_count;
1471
1472read_err:
1473 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
1474 return ret;
1475}
1476
1477static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
1478 size_t *retlen, const u_char *buf)
1479{
1480 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1481 size_t actual;
1482 int ret;
1483
1484 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1485
1486 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
1487 if (ret)
1488 return ret;
1489
1490 write_enable(nor);
1491
1492 nor->sst_write_second = false;
1493
1494 actual = to % 2;
1495
1496 if (actual) {
1497 nor->program_opcode = SPINOR_OP_BP;
1498
1499
1500 nor->write(nor, to, 1, retlen, buf);
1501 ret = spi_nor_wait_till_ready(nor);
1502 if (ret)
1503 goto time_out;
1504 }
1505 to += actual;
1506
1507
1508 for (; actual < len - 1; actual += 2) {
1509 nor->program_opcode = SPINOR_OP_AAI_WP;
1510
1511
1512 nor->write(nor, to, 2, retlen, buf + actual);
1513 ret = spi_nor_wait_till_ready(nor);
1514 if (ret)
1515 goto time_out;
1516 to += 2;
1517 nor->sst_write_second = true;
1518 }
1519 nor->sst_write_second = false;
1520
1521 write_disable(nor);
1522 ret = spi_nor_wait_till_ready(nor);
1523 if (ret)
1524 goto time_out;
1525
1526
1527 if (actual != len) {
1528 write_enable(nor);
1529
1530 nor->program_opcode = SPINOR_OP_BP;
1531 nor->write(nor, to, 1, retlen, buf + actual);
1532
1533 ret = spi_nor_wait_till_ready(nor);
1534 if (ret)
1535 goto time_out;
1536 write_disable(nor);
1537 }
1538time_out:
1539 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
1540 return ret;
1541}
1542
1543
1544
1545
1546
1547
1548static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
1549 size_t *retlen, const u_char *buf)
1550{
1551 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1552 u32 page_offset, page_size, i;
1553 int ret;
1554
1555 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1556
1557 ret = spi_nor_wait_till_ready(nor);
1558 if (ret)
1559 return ret;
1560
1561 write_enable(nor);
1562
1563 page_offset = to & (nor->page_size - 1);
1564
1565
1566 if (page_offset + len <= nor->page_size) {
1567 nor->write(nor, to >> nor->shift, len, retlen, buf);
1568 } else {
1569
1570 page_size = nor->page_size - page_offset;
1571 nor->write(nor, to >> nor->shift, page_size, retlen, buf);
1572
1573
1574 for (i = page_size; i < len; i += page_size) {
1575 page_size = len - i;
1576 if (page_size > nor->page_size)
1577 page_size = nor->page_size;
1578
1579 ret = spi_nor_wait_till_ready(nor);
1580 if (ret)
1581 return ret;
1582 write_enable(nor);
1583
1584 nor->write(nor, (to + i) >> nor->shift, page_size,
1585 retlen, buf + i);
1586 }
1587 }
1588
1589 return 0;
1590}
1591
1592static int spi_nor_write_ext(struct mtd_info *mtd, loff_t to, size_t len,
1593 size_t *retlen, const u_char *buf)
1594{
1595 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1596 u32 addr = to;
1597 u32 offset = to;
1598 u32 write_len = 0;
1599 size_t actual_len = 0;
1600 u32 write_count = 0;
1601 u32 rem_bank_len = 0;
1602 u8 bank = 0;
1603 u8 stack_shift = 0;
1604 int ret;
1605
1606#define OFFSET_16_MB 0x1000000
1607
1608 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1609
1610 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
1611 if (ret)
1612 return ret;
1613
1614 while (len) {
1615 actual_len = 0;
1616 if (nor->addr_width == 3) {
1617 bank = addr / (OFFSET_16_MB << nor->shift);
1618 rem_bank_len = ((OFFSET_16_MB << nor->shift) *
1619 (bank + 1)) - addr;
1620 }
1621 offset = addr;
1622
1623 if (nor->isstacked == 1) {
1624 stack_shift = 1;
1625 if (offset >= (mtd->size / 2)) {
1626 offset = offset - (mtd->size / 2);
1627 nor->spi->master->flags |= SPI_MASTER_U_PAGE;
1628 } else {
1629 nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
1630 }
1631 }
1632
1633 if (nor->addr_width == 4)
1634 rem_bank_len = (mtd->size >> stack_shift) - offset;
1635 if (nor->addr_width == 3)
1636 write_ear(nor, (offset >> nor->shift));
1637 if (len < rem_bank_len)
1638 write_len = len;
1639 else
1640 write_len = rem_bank_len;
1641
1642 ret = spi_nor_write(mtd, offset, write_len, &actual_len, buf);
1643 if (ret)
1644 goto write_err;
1645
1646 addr += actual_len;
1647 len -= actual_len;
1648 buf += actual_len;
1649 write_count += actual_len;
1650 }
1651
1652 *retlen = write_count;
1653
1654write_err:
1655 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
1656 return ret;
1657}
1658
1659static int macronix_quad_enable(struct spi_nor *nor)
1660{
1661 int ret, val;
1662
1663 val = read_sr(nor);
1664 if (val < 0)
1665 return val;
1666 write_enable(nor);
1667
1668 write_sr(nor, val | SR_QUAD_EN_MX);
1669
1670 if (spi_nor_wait_till_ready(nor))
1671 return 1;
1672
1673 ret = read_sr(nor);
1674 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1675 dev_err(nor->dev, "Macronix Quad bit not set\n");
1676 return -EINVAL;
1677 }
1678
1679 return 0;
1680}
1681
1682static int spansion_quad_enable(struct spi_nor *nor)
1683{
1684 int ret;
1685 int quad_en = CR_QUAD_EN_SPAN << 8;
1686
1687 quad_en |= read_sr(nor);
1688 quad_en |= (read_cr(nor) << 8);
1689
1690 write_enable(nor);
1691
1692 ret = write_sr_cr(nor, quad_en);
1693 if (ret < 0) {
1694 dev_err(nor->dev,
1695 "error while writing configuration register\n");
1696 return -EINVAL;
1697 }
1698
1699
1700 ret = read_cr(nor);
1701 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1702 dev_err(nor->dev, "Spansion Quad bit not set\n");
1703 return -EINVAL;
1704 }
1705
1706 return 0;
1707}
1708
1709static int micron_quad_enable(struct spi_nor *nor)
1710{
1711 int ret;
1712 u8 val;
1713
1714 ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
1715 if (ret < 0) {
1716 dev_err(nor->dev, "error %d reading EVCR\n", ret);
1717 return ret;
1718 }
1719
1720 write_enable(nor);
1721
1722
1723 nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
1724 ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
1725 if (ret < 0) {
1726 dev_err(nor->dev, "error while writing EVCR register\n");
1727 return ret;
1728 }
1729
1730 ret = spi_nor_wait_till_ready(nor);
1731 if (ret)
1732 return ret;
1733
1734
1735 ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
1736 if (ret < 0) {
1737 dev_err(nor->dev, "error %d reading EVCR\n", ret);
1738 return ret;
1739 }
1740 if (val & EVCR_QUAD_EN_MICRON) {
1741 dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
1742 return -EINVAL;
1743 }
1744
1745 return 0;
1746}
1747
1748static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
1749{
1750 int status;
1751
1752 switch (JEDEC_MFR(info)) {
1753 case CFI_MFR_ISSI:
1754 case SNOR_MFR_MACRONIX:
1755 status = macronix_quad_enable(nor);
1756 if (status) {
1757 dev_err(nor->dev, "Macronix quad-read not enabled\n");
1758 return -EINVAL;
1759 }
1760 return status;
1761 case SNOR_MFR_MICRON:
1762 if (!(nor->spi->mode & SPI_TX_QUAD)) {
1763 dev_info(nor->dev, "Controller not in SPI_TX_QUAD mode, just use extended SPI mode\n");
1764 return 0;
1765 }
1766 status = micron_quad_enable(nor);
1767 if (status) {
1768 dev_err(nor->dev, "Micron quad-read not enabled\n");
1769 return -EINVAL;
1770 }
1771 return status;
1772 case SNOR_MFR_SPANSION:
1773 return 0;
1774 default:
1775 status = spansion_quad_enable(nor);
1776 if (status) {
1777 dev_err(nor->dev, "Spansion quad-read not enabled\n");
1778 return -EINVAL;
1779 }
1780 return status;
1781 }
1782}
1783
1784static int spi_nor_check(struct spi_nor *nor)
1785{
1786 if (!nor->dev || !nor->read || !nor->write ||
1787 !nor->read_reg || !nor->write_reg) {
1788 pr_err("spi-nor: please fill all the necessary fields!\n");
1789 return -EINVAL;
1790 }
1791
1792 return 0;
1793}
1794
1795int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
1796{
1797 struct flash_info *info = NULL;
1798 struct device *dev = nor->dev;
1799 struct mtd_info *mtd = &nor->mtd;
1800 struct device_node *np = spi_nor_get_flash_node(nor);
1801 struct device_node *np_spi;
1802 uint64_t actual_size;
1803 int ret;
1804 int i;
1805
1806 ret = spi_nor_check(nor);
1807 if (ret)
1808 return ret;
1809
1810 if (name)
1811 info = (struct flash_info *)spi_nor_match_id(name);
1812
1813 if (!info)
1814 info = (struct flash_info *)spi_nor_read_id(nor);
1815 if (IS_ERR_OR_NULL(info))
1816 return -ENOENT;
1817
1818
1819
1820
1821
1822 if (name && info->id_len) {
1823 const struct flash_info *jinfo;
1824
1825 jinfo = spi_nor_read_id(nor);
1826 if (IS_ERR(jinfo)) {
1827 return PTR_ERR(jinfo);
1828 } else if (jinfo != info) {
1829
1830
1831
1832
1833
1834
1835
1836 dev_warn(dev, "found %s, expected %s\n",
1837 jinfo->name, info->name);
1838 info = (struct flash_info *)jinfo;
1839 }
1840 }
1841
1842 mutex_init(&nor->lock);
1843
1844
1845
1846
1847
1848
1849 if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
1850 JEDEC_MFR(info) == SNOR_MFR_INTEL ||
1851 JEDEC_MFR(info) == SNOR_MFR_SST ||
1852 info->flags & SPI_NOR_HAS_LOCK) {
1853 write_enable(nor);
1854 write_sr(nor, 0);
1855
1856 if (info->flags & SST_GLOBAL_PROT_UNLK) {
1857 write_enable(nor);
1858
1859 nor->write_reg(nor, GLOBAL_BLKPROT_UNLK, NULL, 0);
1860 }
1861 spi_nor_wait_till_ready(nor);
1862 }
1863
1864 if (!mtd->name)
1865 mtd->name = dev_name(dev);
1866 mtd->priv = nor;
1867 mtd->type = MTD_NORFLASH;
1868 mtd->writesize = 1;
1869 mtd->flags = MTD_CAP_NORFLASH;
1870 mtd->size = info->sector_size * info->n_sectors;
1871 mtd->_erase = spi_nor_erase;
1872 mtd->_read = spi_nor_read_ext;
1873 actual_size = mtd->size;
1874
1875 {
1876#ifdef CONFIG_OF
1877 u32 is_dual;
1878
1879 np_spi = of_get_next_parent(np);
1880 if ((of_property_match_string(np_spi, "compatible",
1881 "xlnx,zynq-qspi-1.0") >= 0) ||
1882 (of_property_match_string(np_spi, "compatible",
1883 "xlnx,zynqmp-qspi-1.0") >= 0)) {
1884 if (of_property_read_u32(np_spi, "is-dual",
1885 &is_dual) < 0) {
1886
1887 nor->shift = 0;
1888 nor->isstacked = 0;
1889 nor->isparallel = 0;
1890 } else {
1891 if (is_dual == 1) {
1892
1893 nor->shift = 1;
1894 info->sector_size <<= nor->shift;
1895 info->page_size <<= nor->shift;
1896 mtd->size <<= nor->shift;
1897 nor->isparallel = 1;
1898 nor->isstacked = 0;
1899 nor->spi->master->flags |=
1900 (SPI_BOTH_FLASH
1901 | SPI_DATA_STRIPE);
1902 } else {
1903#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
1904
1905 nor->shift = 0;
1906 mtd->size <<= 1;
1907 info->n_sectors <<= 1;
1908 nor->isstacked = 1;
1909 nor->isparallel = 0;
1910#else
1911 u32 is_stacked;
1912 if (of_property_read_u32(np_spi,
1913 "is-stacked",
1914 &is_stacked) < 0) {
1915 is_stacked = 0;
1916 }
1917 if (is_stacked) {
1918
1919 nor->shift = 0;
1920 mtd->size <<= 1;
1921 info->n_sectors <<= 1;
1922 nor->isstacked = 1;
1923 nor->isparallel = 0;
1924 } else {
1925
1926 nor->shift = 0;
1927 nor->isstacked = 0;
1928 nor->isparallel = 0;
1929 }
1930#endif
1931 }
1932 }
1933 }
1934#else
1935
1936 nor->shift = 0;
1937 nor->isstacked = 0;
1938 nor->isparallel = 0;
1939#endif
1940 }
1941
1942
1943 if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
1944 info->flags & SPI_NOR_HAS_LOCK) {
1945 nor->flash_lock = stm_lock;
1946 nor->flash_unlock = stm_unlock;
1947 nor->flash_is_locked = stm_is_locked;
1948 }
1949
1950 nor->n_sectors = info->n_sectors;
1951 nor->sector_size = info->sector_size;
1952
1953
1954 if (info->flags & SPI_NOR_HAS_LOCK) {
1955 mtd->_lock = spi_nor_lock;
1956 mtd->_unlock = spi_nor_unlock;
1957 mtd->_is_locked = spi_nor_is_locked;
1958 }
1959
1960
1961 if (info->flags & SST_WRITE)
1962 mtd->_write = sst_write;
1963 else
1964 mtd->_write = spi_nor_write_ext;
1965
1966 if (info->flags & USE_FSR)
1967 nor->flags |= SNOR_F_USE_FSR;
1968 if (info->flags & SPI_NOR_HAS_TB)
1969 nor->flags |= SNOR_F_HAS_SR_TB;
1970
1971#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
1972
1973 if (info->flags & SECT_4K) {
1974 nor->erase_opcode = SPINOR_OP_BE_4K;
1975 mtd->erasesize = 4096 << nor->shift;
1976 } else if (info->flags & SECT_4K_PMC) {
1977 nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
1978 mtd->erasesize = 4096;
1979 } else
1980#endif
1981 {
1982 nor->erase_opcode = SPINOR_OP_SE;
1983 mtd->erasesize = info->sector_size;
1984 }
1985
1986 if (info->flags & SPI_NOR_NO_ERASE)
1987 mtd->flags |= MTD_NO_ERASE;
1988
1989 nor->jedec_id = info->id[0];
1990 mtd->dev.parent = dev;
1991 nor->page_size = info->page_size;
1992 mtd->writebufsize = nor->page_size;
1993
1994 if (np) {
1995
1996 if (of_property_read_bool(np, "m25p,fast-read"))
1997 nor->flash_read = SPI_NOR_FAST;
1998 else
1999 nor->flash_read = SPI_NOR_NORMAL;
2000 } else {
2001
2002 nor->flash_read = SPI_NOR_FAST;
2003 }
2004
2005
2006 if (info->flags & SPI_NOR_NO_FR)
2007 nor->flash_read = SPI_NOR_NORMAL;
2008
2009
2010 if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
2011 ret = set_quad_mode(nor, info);
2012 if (ret) {
2013 dev_err(dev, "quad mode not supported\n");
2014 return ret;
2015 }
2016 nor->flash_read = SPI_NOR_QUAD;
2017 } else if (mode == SPI_NOR_QUAD &&
2018 info->flags & SPI_NOR_QUAD_IO_READ) {
2019 ret = set_quad_mode(nor, info);
2020 if (ret) {
2021 dev_err(dev, "quad IO mode not supported\n");
2022 return ret;
2023 }
2024 nor->flash_read = SPI_NOR_QUAD_IO;
2025 } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
2026 nor->flash_read = SPI_NOR_DUAL;
2027 }
2028
2029
2030 switch (nor->flash_read) {
2031 case SPI_NOR_QUAD_IO:
2032 nor->read_opcode = SPINOR_OP_READ_1_4_4;
2033 break;
2034 case SPI_NOR_QUAD:
2035 nor->read_opcode = SPINOR_OP_READ_1_1_4;
2036 break;
2037 case SPI_NOR_DUAL:
2038 nor->read_opcode = SPINOR_OP_READ_1_1_2;
2039 break;
2040 case SPI_NOR_FAST:
2041 nor->read_opcode = SPINOR_OP_READ_FAST;
2042 break;
2043 case SPI_NOR_NORMAL:
2044 nor->read_opcode = SPINOR_OP_READ;
2045 break;
2046 default:
2047 dev_err(dev, "No Read opcode defined\n");
2048 return -EINVAL;
2049 }
2050
2051 nor->program_opcode = SPINOR_OP_PP;
2052
2053 if (info->addr_width)
2054 nor->addr_width = info->addr_width;
2055 else if (actual_size > 0x1000000) {
2056#ifdef CONFIG_OF
2057 np_spi = of_get_next_parent(np);
2058 if (of_property_match_string(np_spi, "compatible",
2059 "xlnx,zynq-qspi-1.0") >= 0) {
2060 int status;
2061
2062 nor->addr_width = 3;
2063 set_4byte(nor, info, 0);
2064 status = read_ear(nor, info);
2065 if (status < 0)
2066 dev_warn(dev, "failed to read ear reg\n");
2067 else
2068 nor->curbank = status & EAR_SEGMENT_MASK;
2069 } else {
2070#endif
2071
2072 nor->addr_width = 4;
2073 if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) {
2074
2075 switch (nor->flash_read) {
2076 case SPI_NOR_QUAD_IO:
2077 nor->read_opcode = SPINOR_OP_READ4_1_4_4;
2078 break;
2079 case SPI_NOR_QUAD:
2080 nor->read_opcode = SPINOR_OP_READ4_1_1_4;
2081 break;
2082 case SPI_NOR_DUAL:
2083 nor->read_opcode = SPINOR_OP_READ4_1_1_2;
2084 break;
2085 case SPI_NOR_FAST:
2086 nor->read_opcode = SPINOR_OP_READ4_FAST;
2087 break;
2088 case SPI_NOR_NORMAL:
2089 nor->read_opcode = SPINOR_OP_READ4;
2090 break;
2091 }
2092 nor->program_opcode = SPINOR_OP_PP_4B;
2093
2094 nor->erase_opcode = SPINOR_OP_SE_4B;
2095 mtd->erasesize = info->sector_size;
2096 } else
2097 set_4byte(nor, info, 1);
2098 if (nor->isstacked) {
2099 nor->spi->master->flags |= SPI_MASTER_U_PAGE;
2100 set_4byte(nor, info, 1);
2101 nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
2102 }
2103#ifdef CONFIG_OF
2104 }
2105#endif
2106 } else {
2107 nor->addr_width = 3;
2108 }
2109
2110 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
2111 dev_err(dev, "address width is too large: %u\n",
2112 nor->addr_width);
2113 return -EINVAL;
2114 }
2115
2116 nor->read_dummy = spi_nor_read_dummy_cycles(nor);
2117
2118 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
2119 (long long)mtd->size >> 10);
2120
2121 dev_dbg(dev,
2122 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
2123 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
2124 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
2125 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
2126
2127 if (mtd->numeraseregions)
2128 for (i = 0; i < mtd->numeraseregions; i++)
2129 dev_dbg(dev,
2130 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
2131 ".erasesize = 0x%.8x (%uKiB), "
2132 ".numblocks = %d }\n",
2133 i, (long long)mtd->eraseregions[i].offset,
2134 mtd->eraseregions[i].erasesize,
2135 mtd->eraseregions[i].erasesize / 1024,
2136 mtd->eraseregions[i].numblocks);
2137 return 0;
2138}
2139EXPORT_SYMBOL_GPL(spi_nor_scan);
2140
2141static const struct flash_info *spi_nor_match_id(const char *name)
2142{
2143 const struct flash_info *id = spi_nor_ids;
2144
2145 while (id->name) {
2146 if (!strcmp(name, id->name))
2147 return id;
2148 id++;
2149 }
2150 return NULL;
2151}
2152
2153void spi_nor_shutdown(struct spi_nor *nor)
2154{
2155 struct mtd_info *mtd = &nor->mtd;
2156
2157 if (nor->addr_width == 3 &&
2158 (mtd->size >> nor->shift) > 0x1000000)
2159 write_ear(nor, 0);
2160}
2161EXPORT_SYMBOL_GPL(spi_nor_shutdown);
2162
2163MODULE_LICENSE("GPL");
2164MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
2165MODULE_AUTHOR("Mike Lavender");
2166MODULE_DESCRIPTION("framework for SPI NOR");
2167