1
2
3
4
5
6
7
8
9
10#include <linux/err.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/mutex.h>
15#include <linux/math64.h>
16#include <linux/sizes.h>
17#include <linux/slab.h>
18#include <linux/sort.h>
19
20#include <linux/mtd/mtd.h>
21#include <linux/of_platform.h>
22#include <linux/spi/flash.h>
23#include <linux/mtd/spi-nor.h>
24
25
26
27
28
29
30
31#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
32
33
34
35
36
37#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
38
39#define SPI_NOR_MAX_ID_LEN 6
40#define SPI_NOR_MAX_ADDR_WIDTH 4
41
42struct spi_nor_read_command {
43 u8 num_mode_clocks;
44 u8 num_wait_states;
45 u8 opcode;
46 enum spi_nor_protocol proto;
47};
48
49struct spi_nor_pp_command {
50 u8 opcode;
51 enum spi_nor_protocol proto;
52};
53
54enum spi_nor_read_command_index {
55 SNOR_CMD_READ,
56 SNOR_CMD_READ_FAST,
57 SNOR_CMD_READ_1_1_1_DTR,
58
59
60 SNOR_CMD_READ_1_1_2,
61 SNOR_CMD_READ_1_2_2,
62 SNOR_CMD_READ_2_2_2,
63 SNOR_CMD_READ_1_2_2_DTR,
64
65
66 SNOR_CMD_READ_1_1_4,
67 SNOR_CMD_READ_1_4_4,
68 SNOR_CMD_READ_4_4_4,
69 SNOR_CMD_READ_1_4_4_DTR,
70
71
72 SNOR_CMD_READ_1_1_8,
73 SNOR_CMD_READ_1_8_8,
74 SNOR_CMD_READ_8_8_8,
75 SNOR_CMD_READ_1_8_8_DTR,
76
77 SNOR_CMD_READ_MAX
78};
79
80enum spi_nor_pp_command_index {
81 SNOR_CMD_PP,
82
83
84 SNOR_CMD_PP_1_1_4,
85 SNOR_CMD_PP_1_4_4,
86 SNOR_CMD_PP_4_4_4,
87
88
89 SNOR_CMD_PP_1_1_8,
90 SNOR_CMD_PP_1_8_8,
91 SNOR_CMD_PP_8_8_8,
92
93 SNOR_CMD_PP_MAX
94};
95
96struct spi_nor_flash_parameter {
97 u64 size;
98 u32 page_size;
99
100 struct spi_nor_hwcaps hwcaps;
101 struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
102 struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
103
104 int (*quad_enable)(struct spi_nor *nor);
105};
106
107struct sfdp_parameter_header {
108 u8 id_lsb;
109 u8 minor;
110 u8 major;
111 u8 length;
112 u8 parameter_table_pointer[3];
113 u8 id_msb;
114};
115
116#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
117#define SFDP_PARAM_HEADER_PTP(p) \
118 (((p)->parameter_table_pointer[2] << 16) | \
119 ((p)->parameter_table_pointer[1] << 8) | \
120 ((p)->parameter_table_pointer[0] << 0))
121
122#define SFDP_BFPT_ID 0xff00
123#define SFDP_SECTOR_MAP_ID 0xff81
124#define SFDP_4BAIT_ID 0xff84
125
126#define SFDP_SIGNATURE 0x50444653U
127#define SFDP_JESD216_MAJOR 1
128#define SFDP_JESD216_MINOR 0
129#define SFDP_JESD216A_MINOR 5
130#define SFDP_JESD216B_MINOR 6
131
132struct sfdp_header {
133 u32 signature;
134 u8 minor;
135 u8 major;
136 u8 nph;
137 u8 unused;
138
139
140 struct sfdp_parameter_header bfpt_header;
141};
142
143
144
145
146
147
148
149#define BFPT_DWORD(i) ((i) - 1)
150#define BFPT_DWORD_MAX 16
151
152
153#define BFPT_DWORD_MAX_JESD216 9
154
155
156#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
157#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
158#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
159#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
160#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
161#define BFPT_DWORD1_DTR BIT(19)
162#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
163#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
164#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
165
166
167#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
168#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
169
170
171#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
172#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
208#define BFPT_DWORD15_QER_NONE (0x0UL << 20)
209#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
210#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20)
211#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
212#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
213#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20)
214
215struct sfdp_bfpt {
216 u32 dwords[BFPT_DWORD_MAX];
217};
218
219
220
221
222
223
224
225
226struct spi_nor_fixups {
227 int (*post_bfpt)(struct spi_nor *nor,
228 const struct sfdp_parameter_header *bfpt_header,
229 const struct sfdp_bfpt *bfpt,
230 struct spi_nor_flash_parameter *params);
231};
232
233struct flash_info {
234 char *name;
235
236
237
238
239
240
241 u8 id[SPI_NOR_MAX_ID_LEN];
242 u8 id_len;
243
244
245
246
247 unsigned sector_size;
248 u16 n_sectors;
249
250 u16 page_size;
251 u16 addr_width;
252
253 u16 flags;
254#define SECT_4K BIT(0)
255#define SPI_NOR_NO_ERASE BIT(1)
256#define SST_WRITE BIT(2)
257#define SPI_NOR_NO_FR BIT(3)
258#define SECT_4K_PMC BIT(4)
259#define SPI_NOR_DUAL_READ BIT(5)
260#define SPI_NOR_QUAD_READ BIT(6)
261#define USE_FSR BIT(7)
262#define SPI_NOR_HAS_LOCK BIT(8)
263#define SPI_NOR_HAS_TB BIT(9)
264
265
266
267
268#define SPI_S3AN BIT(10)
269
270
271
272
273
274#define SPI_NOR_4B_OPCODES BIT(11)
275
276
277
278#define NO_CHIP_ERASE BIT(12)
279#define SPI_NOR_SKIP_SFDP BIT(13)
280#define USE_CLSR BIT(14)
281
282
283 const struct spi_nor_fixups *fixups;
284
285 int (*quad_enable)(struct spi_nor *nor);
286};
287
288#define JEDEC_MFR(info) ((info)->id[0])
289
290
291
292
293
294
295static int read_sr(struct spi_nor *nor)
296{
297 int ret;
298 u8 val;
299
300 ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
301 if (ret < 0) {
302 pr_err("error %d reading SR\n", (int) ret);
303 return ret;
304 }
305
306 return val;
307}
308
309
310
311
312
313
314static int read_fsr(struct spi_nor *nor)
315{
316 int ret;
317 u8 val;
318
319 ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
320 if (ret < 0) {
321 pr_err("error %d reading FSR\n", ret);
322 return ret;
323 }
324
325 return val;
326}
327
328
329
330
331
332
333static int read_cr(struct spi_nor *nor)
334{
335 int ret;
336 u8 val;
337
338 ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
339 if (ret < 0) {
340 dev_err(nor->dev, "error %d reading CR\n", ret);
341 return ret;
342 }
343
344 return val;
345}
346
347
348
349
350
351static int write_sr(struct spi_nor *nor, u8 val)
352{
353 nor->cmd_buf[0] = val;
354 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
355}
356
357
358
359
360
361static int write_enable(struct spi_nor *nor)
362{
363 return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
364}
365
366
367
368
369static int write_disable(struct spi_nor *nor)
370{
371 return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
372}
373
374static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
375{
376 return mtd->priv;
377}
378
379
380static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
381{
382 size_t i;
383
384 for (i = 0; i < size; i++)
385 if (table[i][0] == opcode)
386 return table[i][1];
387
388
389 return opcode;
390}
391
392static u8 spi_nor_convert_3to4_read(u8 opcode)
393{
394 static const u8 spi_nor_3to4_read[][2] = {
395 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
396 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
397 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
398 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
399 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
400 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
401
402 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
403 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
404 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
405 };
406
407 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
408 ARRAY_SIZE(spi_nor_3to4_read));
409}
410
411static u8 spi_nor_convert_3to4_program(u8 opcode)
412{
413 static const u8 spi_nor_3to4_program[][2] = {
414 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
415 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
416 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
417 };
418
419 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
420 ARRAY_SIZE(spi_nor_3to4_program));
421}
422
423static u8 spi_nor_convert_3to4_erase(u8 opcode)
424{
425 static const u8 spi_nor_3to4_erase[][2] = {
426 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
427 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
428 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
429 };
430
431 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
432 ARRAY_SIZE(spi_nor_3to4_erase));
433}
434
435static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
436{
437
438 switch (JEDEC_MFR(nor->info)) {
439 case SNOR_MFR_SPANSION:
440
441 nor->erase_opcode = SPINOR_OP_SE;
442 nor->mtd.erasesize = nor->info->sector_size;
443 break;
444
445 default:
446 break;
447 }
448
449 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
450 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
451 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
452
453 if (!spi_nor_has_uniform_erase(nor)) {
454 struct spi_nor_erase_map *map = &nor->erase_map;
455 struct spi_nor_erase_type *erase;
456 int i;
457
458 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
459 erase = &map->erase_type[i];
460 erase->opcode =
461 spi_nor_convert_3to4_erase(erase->opcode);
462 }
463 }
464}
465
466
467static int set_4byte(struct spi_nor *nor, bool enable)
468{
469 int status;
470 bool need_wren = false;
471 u8 cmd;
472
473 switch (JEDEC_MFR(nor->info)) {
474 case SNOR_MFR_ST:
475 case SNOR_MFR_MICRON:
476
477 need_wren = true;
478
479 case SNOR_MFR_MACRONIX:
480 case SNOR_MFR_WINBOND:
481 if (need_wren)
482 write_enable(nor);
483
484 cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
485 status = nor->write_reg(nor, cmd, NULL, 0);
486 if (need_wren)
487 write_disable(nor);
488
489 if (!status && !enable &&
490 JEDEC_MFR(nor->info) == SNOR_MFR_WINBOND) {
491
492
493
494
495
496
497 write_enable(nor);
498 nor->cmd_buf[0] = 0;
499 nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
500 write_disable(nor);
501 }
502
503 return status;
504 default:
505
506 nor->cmd_buf[0] = enable << 7;
507 return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
508 }
509}
510
511static int s3an_sr_ready(struct spi_nor *nor)
512{
513 int ret;
514 u8 val;
515
516 ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
517 if (ret < 0) {
518 dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
519 return ret;
520 }
521
522 return !!(val & XSR_RDY);
523}
524
525static int spi_nor_sr_ready(struct spi_nor *nor)
526{
527 int sr = read_sr(nor);
528 if (sr < 0)
529 return sr;
530
531 if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
532 if (sr & SR_E_ERR)
533 dev_err(nor->dev, "Erase Error occurred\n");
534 else
535 dev_err(nor->dev, "Programming Error occurred\n");
536
537 nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
538 return -EIO;
539 }
540
541 return !(sr & SR_WIP);
542}
543
544static int spi_nor_fsr_ready(struct spi_nor *nor)
545{
546 int fsr = read_fsr(nor);
547 if (fsr < 0)
548 return fsr;
549
550 if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
551 if (fsr & FSR_E_ERR)
552 dev_err(nor->dev, "Erase operation failed.\n");
553 else
554 dev_err(nor->dev, "Program operation failed.\n");
555
556 if (fsr & FSR_PT_ERR)
557 dev_err(nor->dev,
558 "Attempted to modify a protected sector.\n");
559
560 nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
561 return -EIO;
562 }
563
564 return fsr & FSR_READY;
565}
566
567static int spi_nor_ready(struct spi_nor *nor)
568{
569 int sr, fsr;
570
571 if (nor->flags & SNOR_F_READY_XSR_RDY)
572 sr = s3an_sr_ready(nor);
573 else
574 sr = spi_nor_sr_ready(nor);
575 if (sr < 0)
576 return sr;
577 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
578 if (fsr < 0)
579 return fsr;
580 return sr && fsr;
581}
582
583
584
585
586
587static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
588 unsigned long timeout_jiffies)
589{
590 unsigned long deadline;
591 int timeout = 0, ret;
592
593 deadline = jiffies + timeout_jiffies;
594
595 while (!timeout) {
596 if (time_after_eq(jiffies, deadline))
597 timeout = 1;
598
599 ret = spi_nor_ready(nor);
600 if (ret < 0)
601 return ret;
602 if (ret)
603 return 0;
604
605 cond_resched();
606 }
607
608 dev_err(nor->dev, "flash operation timed out\n");
609
610 return -ETIMEDOUT;
611}
612
613static int spi_nor_wait_till_ready(struct spi_nor *nor)
614{
615 return spi_nor_wait_till_ready_with_timeout(nor,
616 DEFAULT_READY_WAIT_JIFFIES);
617}
618
619
620
621
622
623
624static int erase_chip(struct spi_nor *nor)
625{
626 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
627
628 return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
629}
630
631static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
632{
633 int ret = 0;
634
635 mutex_lock(&nor->lock);
636
637 if (nor->prepare) {
638 ret = nor->prepare(nor, ops);
639 if (ret) {
640 dev_err(nor->dev, "failed in the preparation.\n");
641 mutex_unlock(&nor->lock);
642 return ret;
643 }
644 }
645 return ret;
646}
647
648static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
649{
650 if (nor->unprepare)
651 nor->unprepare(nor, ops);
652 mutex_unlock(&nor->lock);
653}
654
655
656
657
658
659
660
661
662
663
664static loff_t spi_nor_s3an_addr_convert(struct spi_nor *nor, unsigned int addr)
665{
666 unsigned int offset;
667 unsigned int page;
668
669 offset = addr % nor->page_size;
670 page = addr / nor->page_size;
671 page <<= (nor->page_size > 512) ? 10 : 9;
672
673 return page | offset;
674}
675
676
677
678
679static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
680{
681 u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
682 int i;
683
684 if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
685 addr = spi_nor_s3an_addr_convert(nor, addr);
686
687 if (nor->erase)
688 return nor->erase(nor, addr);
689
690
691
692
693
694 for (i = nor->addr_width - 1; i >= 0; i--) {
695 buf[i] = addr & 0xff;
696 addr >>= 8;
697 }
698
699 return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
700}
701
702
703
704
705
706
707
708
709
710static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
711 u64 dividend, u32 *remainder)
712{
713
714 *remainder = (u32)dividend & erase->size_mask;
715 return dividend >> erase->size_shift;
716}
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731static const struct spi_nor_erase_type *
732spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
733 const struct spi_nor_erase_region *region,
734 u64 addr, u32 len)
735{
736 const struct spi_nor_erase_type *erase;
737 u32 rem;
738 int i;
739 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
740
741
742
743
744
745 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
746
747 if (!(erase_mask & BIT(i)))
748 continue;
749
750 erase = &map->erase_type[i];
751
752
753 if (erase->size > len)
754 continue;
755
756
757 if (region->offset & SNOR_OVERLAID_REGION)
758 return erase;
759
760 spi_nor_div_by_erase_size(erase, addr, &rem);
761 if (rem)
762 continue;
763 else
764 return erase;
765 }
766
767 return NULL;
768}
769
770
771
772
773
774
775
776static struct spi_nor_erase_region *
777spi_nor_region_next(struct spi_nor_erase_region *region)
778{
779 if (spi_nor_region_is_last(region))
780 return NULL;
781 region++;
782 return region;
783}
784
785
786
787
788
789
790
791
792
793
794static struct spi_nor_erase_region *
795spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
796{
797 struct spi_nor_erase_region *region = map->regions;
798 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
799 u64 region_end = region_start + region->size;
800
801 while (addr < region_start || addr >= region_end) {
802 region = spi_nor_region_next(region);
803 if (!region)
804 return ERR_PTR(-EINVAL);
805
806 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
807 region_end = region_start + region->size;
808 }
809
810 return region;
811}
812
813
814
815
816
817
818
819
820
821static struct spi_nor_erase_command *
822spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
823 const struct spi_nor_erase_type *erase)
824{
825 struct spi_nor_erase_command *cmd;
826
827 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
828 if (!cmd)
829 return ERR_PTR(-ENOMEM);
830
831 INIT_LIST_HEAD(&cmd->list);
832 cmd->opcode = erase->opcode;
833 cmd->count = 1;
834
835 if (region->offset & SNOR_OVERLAID_REGION)
836 cmd->size = region->size;
837 else
838 cmd->size = erase->size;
839
840 return cmd;
841}
842
843
844
845
846
847static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
848{
849 struct spi_nor_erase_command *cmd, *next;
850
851 list_for_each_entry_safe(cmd, next, erase_list, list) {
852 list_del(&cmd->list);
853 kfree(cmd);
854 }
855}
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
871 struct list_head *erase_list,
872 u64 addr, u32 len)
873{
874 const struct spi_nor_erase_map *map = &nor->erase_map;
875 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
876 struct spi_nor_erase_region *region;
877 struct spi_nor_erase_command *cmd = NULL;
878 u64 region_end;
879 int ret = -EINVAL;
880
881 region = spi_nor_find_erase_region(map, addr);
882 if (IS_ERR(region))
883 return PTR_ERR(region);
884
885 region_end = spi_nor_region_end(region);
886
887 while (len) {
888 erase = spi_nor_find_best_erase_type(map, region, addr, len);
889 if (!erase)
890 goto destroy_erase_cmd_list;
891
892 if (prev_erase != erase ||
893 region->offset & SNOR_OVERLAID_REGION) {
894 cmd = spi_nor_init_erase_cmd(region, erase);
895 if (IS_ERR(cmd)) {
896 ret = PTR_ERR(cmd);
897 goto destroy_erase_cmd_list;
898 }
899
900 list_add_tail(&cmd->list, erase_list);
901 } else {
902 cmd->count++;
903 }
904
905 addr += cmd->size;
906 len -= cmd->size;
907
908 if (len && addr >= region_end) {
909 region = spi_nor_region_next(region);
910 if (!region)
911 goto destroy_erase_cmd_list;
912 region_end = spi_nor_region_end(region);
913 }
914
915 prev_erase = erase;
916 }
917
918 return 0;
919
920destroy_erase_cmd_list:
921 spi_nor_destroy_erase_cmd_list(erase_list);
922 return ret;
923}
924
925
926
927
928
929
930
931
932
933
934
935
936static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
937{
938 LIST_HEAD(erase_list);
939 struct spi_nor_erase_command *cmd, *next;
940 int ret;
941
942 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
943 if (ret)
944 return ret;
945
946 list_for_each_entry_safe(cmd, next, &erase_list, list) {
947 nor->erase_opcode = cmd->opcode;
948 while (cmd->count) {
949 write_enable(nor);
950
951 ret = spi_nor_erase_sector(nor, addr);
952 if (ret)
953 goto destroy_erase_cmd_list;
954
955 addr += cmd->size;
956 cmd->count--;
957
958 ret = spi_nor_wait_till_ready(nor);
959 if (ret)
960 goto destroy_erase_cmd_list;
961 }
962 list_del(&cmd->list);
963 kfree(cmd);
964 }
965
966 return 0;
967
968destroy_erase_cmd_list:
969 spi_nor_destroy_erase_cmd_list(&erase_list);
970 return ret;
971}
972
973
974
975
976
977static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
978{
979 struct spi_nor *nor = mtd_to_spi_nor(mtd);
980 u32 addr, len;
981 uint32_t rem;
982 int ret;
983
984 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
985 (long long)instr->len);
986
987 if (spi_nor_has_uniform_erase(nor)) {
988 div_u64_rem(instr->len, mtd->erasesize, &rem);
989 if (rem)
990 return -EINVAL;
991 }
992
993 addr = instr->addr;
994 len = instr->len;
995
996 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
997 if (ret)
998 return ret;
999
1000
1001 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1002 unsigned long timeout;
1003
1004 write_enable(nor);
1005
1006 if (erase_chip(nor)) {
1007 ret = -EIO;
1008 goto erase_err;
1009 }
1010
1011
1012
1013
1014
1015
1016
1017 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1018 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1019 (unsigned long)(mtd->size / SZ_2M));
1020 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1021 if (ret)
1022 goto erase_err;
1023
1024
1025
1026
1027
1028
1029
1030 } else if (spi_nor_has_uniform_erase(nor)) {
1031 while (len) {
1032 write_enable(nor);
1033
1034 ret = spi_nor_erase_sector(nor, addr);
1035 if (ret)
1036 goto erase_err;
1037
1038 addr += mtd->erasesize;
1039 len -= mtd->erasesize;
1040
1041 ret = spi_nor_wait_till_ready(nor);
1042 if (ret)
1043 goto erase_err;
1044 }
1045
1046
1047 } else {
1048 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1049 if (ret)
1050 goto erase_err;
1051 }
1052
1053 write_disable(nor);
1054
1055erase_err:
1056 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
1057
1058 return ret;
1059}
1060
1061
1062static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
1063{
1064 int ret;
1065
1066 write_enable(nor);
1067 ret = write_sr(nor, status_new);
1068 if (ret)
1069 return ret;
1070
1071 ret = spi_nor_wait_till_ready(nor);
1072 if (ret)
1073 return ret;
1074
1075 ret = read_sr(nor);
1076 if (ret < 0)
1077 return ret;
1078
1079 return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
1080}
1081
1082static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
1083 uint64_t *len)
1084{
1085 struct mtd_info *mtd = &nor->mtd;
1086 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1087 int shift = ffs(mask) - 1;
1088 int pow;
1089
1090 if (!(sr & mask)) {
1091
1092 *ofs = 0;
1093 *len = 0;
1094 } else {
1095 pow = ((sr & mask) ^ mask) >> shift;
1096 *len = mtd->size >> pow;
1097 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
1098 *ofs = 0;
1099 else
1100 *ofs = mtd->size - *len;
1101 }
1102}
1103
1104
1105
1106
1107
1108static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1109 u8 sr, bool locked)
1110{
1111 loff_t lock_offs;
1112 uint64_t lock_len;
1113
1114 if (!len)
1115 return 1;
1116
1117 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
1118
1119 if (locked)
1120
1121 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1122 else
1123
1124 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1125}
1126
1127static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1128 u8 sr)
1129{
1130 return stm_check_lock_status_sr(nor, ofs, len, sr, true);
1131}
1132
1133static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1134 u8 sr)
1135{
1136 return stm_check_lock_status_sr(nor, ofs, len, sr, false);
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1172{
1173 struct mtd_info *mtd = &nor->mtd;
1174 int status_old, status_new;
1175 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1176 u8 shift = ffs(mask) - 1, pow, val;
1177 loff_t lock_len;
1178 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1179 bool use_top;
1180
1181 status_old = read_sr(nor);
1182 if (status_old < 0)
1183 return status_old;
1184
1185
1186 if (stm_is_locked_sr(nor, ofs, len, status_old))
1187 return 0;
1188
1189
1190 if (!stm_is_locked_sr(nor, 0, ofs, status_old))
1191 can_be_bottom = false;
1192
1193
1194 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1195 status_old))
1196 can_be_top = false;
1197
1198 if (!can_be_bottom && !can_be_top)
1199 return -EINVAL;
1200
1201
1202 use_top = can_be_top;
1203
1204
1205 if (use_top)
1206 lock_len = mtd->size - ofs;
1207 else
1208 lock_len = ofs + len;
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 pow = ilog2(mtd->size) - ilog2(lock_len);
1220 val = mask - (pow << shift);
1221 if (val & ~mask)
1222 return -EINVAL;
1223
1224 if (!(val & mask))
1225 return -EINVAL;
1226
1227 status_new = (status_old & ~mask & ~SR_TB) | val;
1228
1229
1230 status_new |= SR_SRWD;
1231
1232 if (!use_top)
1233 status_new |= SR_TB;
1234
1235
1236 if (status_new == status_old)
1237 return 0;
1238
1239
1240 if ((status_new & mask) < (status_old & mask))
1241 return -EINVAL;
1242
1243 return write_sr_and_check(nor, status_new, mask);
1244}
1245
1246
1247
1248
1249
1250
1251static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1252{
1253 struct mtd_info *mtd = &nor->mtd;
1254 int status_old, status_new;
1255 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1256 u8 shift = ffs(mask) - 1, pow, val;
1257 loff_t lock_len;
1258 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1259 bool use_top;
1260
1261 status_old = read_sr(nor);
1262 if (status_old < 0)
1263 return status_old;
1264
1265
1266 if (stm_is_unlocked_sr(nor, ofs, len, status_old))
1267 return 0;
1268
1269
1270 if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
1271 can_be_top = false;
1272
1273
1274 if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1275 status_old))
1276 can_be_bottom = false;
1277
1278 if (!can_be_bottom && !can_be_top)
1279 return -EINVAL;
1280
1281
1282 use_top = can_be_top;
1283
1284
1285 if (use_top)
1286 lock_len = mtd->size - (ofs + len);
1287 else
1288 lock_len = ofs;
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299 pow = ilog2(mtd->size) - order_base_2(lock_len);
1300 if (lock_len == 0) {
1301 val = 0;
1302 } else {
1303 val = mask - (pow << shift);
1304
1305 if (val & ~mask)
1306 return -EINVAL;
1307 }
1308
1309 status_new = (status_old & ~mask & ~SR_TB) | val;
1310
1311
1312 if (lock_len == 0)
1313 status_new &= ~SR_SRWD;
1314
1315 if (!use_top)
1316 status_new |= SR_TB;
1317
1318
1319 if (status_new == status_old)
1320 return 0;
1321
1322
1323 if ((status_new & mask) > (status_old & mask))
1324 return -EINVAL;
1325
1326 return write_sr_and_check(nor, status_new, mask);
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1337{
1338 int status;
1339
1340 status = read_sr(nor);
1341 if (status < 0)
1342 return status;
1343
1344 return stm_is_locked_sr(nor, ofs, len, status);
1345}
1346
1347static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1348{
1349 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1350 int ret;
1351
1352 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
1353 if (ret)
1354 return ret;
1355
1356 ret = nor->flash_lock(nor, ofs, len);
1357
1358 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
1359 return ret;
1360}
1361
1362static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1363{
1364 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1365 int ret;
1366
1367 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1368 if (ret)
1369 return ret;
1370
1371 ret = nor->flash_unlock(nor, ofs, len);
1372
1373 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1374 return ret;
1375}
1376
1377static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1378{
1379 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1380 int ret;
1381
1382 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1383 if (ret)
1384 return ret;
1385
1386 ret = nor->flash_is_locked(nor, ofs, len);
1387
1388 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1389 return ret;
1390}
1391
1392
1393
1394
1395
1396
1397
1398static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
1399{
1400 int ret;
1401
1402 write_enable(nor);
1403
1404 ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
1405 if (ret < 0) {
1406 dev_err(nor->dev,
1407 "error while writing configuration register\n");
1408 return -EINVAL;
1409 }
1410
1411 ret = spi_nor_wait_till_ready(nor);
1412 if (ret) {
1413 dev_err(nor->dev,
1414 "timeout while writing configuration register\n");
1415 return ret;
1416 }
1417
1418 return 0;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static int macronix_quad_enable(struct spi_nor *nor)
1432{
1433 int ret, val;
1434
1435 val = read_sr(nor);
1436 if (val < 0)
1437 return val;
1438 if (val & SR_QUAD_EN_MX)
1439 return 0;
1440
1441 write_enable(nor);
1442
1443 write_sr(nor, val | SR_QUAD_EN_MX);
1444
1445 ret = spi_nor_wait_till_ready(nor);
1446 if (ret)
1447 return ret;
1448
1449 ret = read_sr(nor);
1450 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1451 dev_err(nor->dev, "Macronix Quad bit not set\n");
1452 return -EINVAL;
1453 }
1454
1455 return 0;
1456}
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481static int spansion_quad_enable(struct spi_nor *nor)
1482{
1483 u8 sr_cr[2] = {0, CR_QUAD_EN_SPAN};
1484 int ret;
1485
1486 ret = write_sr_cr(nor, sr_cr);
1487 if (ret)
1488 return ret;
1489
1490
1491 ret = read_cr(nor);
1492 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1493 dev_err(nor->dev, "Spansion Quad bit not set\n");
1494 return -EINVAL;
1495 }
1496
1497 return 0;
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
1514{
1515 u8 sr_cr[2];
1516 int ret;
1517
1518
1519 ret = read_sr(nor);
1520 if (ret < 0) {
1521 dev_err(nor->dev, "error while reading status register\n");
1522 return -EINVAL;
1523 }
1524 sr_cr[0] = ret;
1525 sr_cr[1] = CR_QUAD_EN_SPAN;
1526
1527 return write_sr_cr(nor, sr_cr);
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543static int spansion_read_cr_quad_enable(struct spi_nor *nor)
1544{
1545 struct device *dev = nor->dev;
1546 u8 sr_cr[2];
1547 int ret;
1548
1549
1550 ret = read_cr(nor);
1551 if (ret < 0) {
1552 dev_err(dev, "error while reading configuration register\n");
1553 return -EINVAL;
1554 }
1555
1556 if (ret & CR_QUAD_EN_SPAN)
1557 return 0;
1558
1559 sr_cr[1] = ret | CR_QUAD_EN_SPAN;
1560
1561
1562 ret = read_sr(nor);
1563 if (ret < 0) {
1564 dev_err(dev, "error while reading status register\n");
1565 return -EINVAL;
1566 }
1567 sr_cr[0] = ret;
1568
1569 ret = write_sr_cr(nor, sr_cr);
1570 if (ret)
1571 return ret;
1572
1573
1574 ret = read_cr(nor);
1575 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1576 dev_err(nor->dev, "Spansion Quad bit not set\n");
1577 return -EINVAL;
1578 }
1579
1580 return 0;
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595static int sr2_bit7_quad_enable(struct spi_nor *nor)
1596{
1597 u8 sr2;
1598 int ret;
1599
1600
1601 ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
1602 if (ret)
1603 return ret;
1604 if (sr2 & SR2_QUAD_EN_BIT7)
1605 return 0;
1606
1607
1608 sr2 |= SR2_QUAD_EN_BIT7;
1609
1610 write_enable(nor);
1611
1612 ret = nor->write_reg(nor, SPINOR_OP_WRSR2, &sr2, 1);
1613 if (ret < 0) {
1614 dev_err(nor->dev, "error while writing status register 2\n");
1615 return -EINVAL;
1616 }
1617
1618 ret = spi_nor_wait_till_ready(nor);
1619 if (ret < 0) {
1620 dev_err(nor->dev, "timeout while writing status register 2\n");
1621 return ret;
1622 }
1623
1624
1625 ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
1626 if (!(ret > 0 && (sr2 & SR2_QUAD_EN_BIT7))) {
1627 dev_err(nor->dev, "SR2 Quad bit not set\n");
1628 return -EINVAL;
1629 }
1630
1631 return 0;
1632}
1633
1634
1635#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
1636 .id = { \
1637 ((_jedec_id) >> 16) & 0xff, \
1638 ((_jedec_id) >> 8) & 0xff, \
1639 (_jedec_id) & 0xff, \
1640 ((_ext_id) >> 8) & 0xff, \
1641 (_ext_id) & 0xff, \
1642 }, \
1643 .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
1644 .sector_size = (_sector_size), \
1645 .n_sectors = (_n_sectors), \
1646 .page_size = 256, \
1647 .flags = (_flags),
1648
1649#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
1650 .id = { \
1651 ((_jedec_id) >> 16) & 0xff, \
1652 ((_jedec_id) >> 8) & 0xff, \
1653 (_jedec_id) & 0xff, \
1654 ((_ext_id) >> 16) & 0xff, \
1655 ((_ext_id) >> 8) & 0xff, \
1656 (_ext_id) & 0xff, \
1657 }, \
1658 .id_len = 6, \
1659 .sector_size = (_sector_size), \
1660 .n_sectors = (_n_sectors), \
1661 .page_size = 256, \
1662 .flags = (_flags),
1663
1664#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
1665 .sector_size = (_sector_size), \
1666 .n_sectors = (_n_sectors), \
1667 .page_size = (_page_size), \
1668 .addr_width = (_addr_width), \
1669 .flags = (_flags),
1670
1671#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
1672 .id = { \
1673 ((_jedec_id) >> 16) & 0xff, \
1674 ((_jedec_id) >> 8) & 0xff, \
1675 (_jedec_id) & 0xff \
1676 }, \
1677 .id_len = 3, \
1678 .sector_size = (8*_page_size), \
1679 .n_sectors = (_n_sectors), \
1680 .page_size = _page_size, \
1681 .addr_width = 3, \
1682 .flags = SPI_NOR_NO_FR | SPI_S3AN,
1683
1684static int
1685mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
1686 const struct sfdp_parameter_header *bfpt_header,
1687 const struct sfdp_bfpt *bfpt,
1688 struct spi_nor_flash_parameter *params)
1689{
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699 if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
1700 nor->flags |= SNOR_F_4B_OPCODES;
1701
1702 return 0;
1703}
1704
1705static struct spi_nor_fixups mx25l25635_fixups = {
1706 .post_bfpt = mx25l25635_post_bfpt_fixups,
1707};
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720static const struct flash_info spi_nor_ids[] = {
1721
1722 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
1723 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
1724
1725 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
1726 { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
1727 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
1728 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
1729
1730 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
1731 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
1732 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
1733 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
1734
1735 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
1736
1737
1738 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
1739 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
1740 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
1741 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
1742 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
1743 { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
1744 { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
1745 { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
1746 { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
1747
1748
1749 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
1750 { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
1751 { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
1752
1753
1754 { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1755 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1756 { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1757 { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1758
1759
1760 { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
1761
1762
1763 {
1764 "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
1765 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1766 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1767 },
1768 {
1769 "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
1770 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1771 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1772 },
1773 {
1774 "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
1775 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1776 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1777 },
1778 {
1779 "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
1780 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1781 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1782 },
1783 {
1784 "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
1785 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1786 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1787 },
1788 {
1789 "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
1790 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1791 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1792 },
1793 {
1794 "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
1795 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1796 SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1797 .quad_enable = macronix_quad_enable,
1798 },
1799
1800
1801 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
1802 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
1803 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
1804
1805
1806 { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
1807 { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
1808 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1809 { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
1810 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1811 { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
1812 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1813 { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
1814 SECT_4K | SPI_NOR_DUAL_READ) },
1815 { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
1816 SECT_4K | SPI_NOR_DUAL_READ) },
1817 { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
1818 SECT_4K | SPI_NOR_DUAL_READ) },
1819 { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
1820 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1821 SPI_NOR_4B_OPCODES) },
1822 { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
1823 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1824 { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
1825 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1826 { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
1827 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1828
1829
1830 { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
1831 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
1832 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
1833 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
1834 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
1835 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
1836 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
1837 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
1838 { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
1839 { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
1840 { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
1841 { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
1842 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
1843 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
1844 { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
1845 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1846 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
1847 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
1848 .fixups = &mx25l25635_fixups },
1849 { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
1850 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
1851 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
1852 { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
1853 { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1854 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
1855
1856
1857 { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
1858 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
1859 { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
1860 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
1861 { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
1862 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
1863 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
1864 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1865 { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
1866 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
1867 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
1868 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1869 { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1870 { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1871
1872
1873 {
1874 "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
1875 SECT_4K | USE_FSR | SPI_NOR_4B_OPCODES)
1876 },
1877
1878
1879 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
1880 { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
1881 { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
1882
1883
1884
1885
1886 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1887 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1888 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
1889 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
1890 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
1891 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
1892 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
1893 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
1894 { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
1895 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
1896 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
1897 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
1898 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
1899 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
1900 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
1901 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
1902 { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1903 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1904 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1905 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
1906 { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1907 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
1908 { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
1909 { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
1910 { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
1911 { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
1912 { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
1913 { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
1914
1915
1916 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
1917 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
1918 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
1919 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
1920 { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
1921 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
1922 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
1923 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
1924 { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
1925 { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
1926 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
1927 { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
1928 { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1929
1930
1931 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
1932 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
1933 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
1934 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
1935 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
1936 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
1937 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
1938 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
1939 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
1940
1941 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
1942 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
1943 { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
1944 { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
1945 { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
1946 { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
1947 { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
1948 { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
1949 { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
1950
1951 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
1952 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
1953 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
1954
1955 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
1956 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
1957 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
1958
1959 { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
1960 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
1961 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
1962 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
1963 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
1964 { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
1965
1966
1967 { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
1968 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
1969 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
1970 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
1971 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
1972 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
1973 {
1974 "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
1975 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1976 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1977 },
1978 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
1979 { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
1980 { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
1981 { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
1982 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
1983 {
1984 "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
1985 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1986 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1987 },
1988 {
1989 "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
1990 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1991 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1992 },
1993 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
1994 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
1995 {
1996 "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
1997 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1998 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1999 },
2000 {
2001 "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
2002 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2003 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2004 },
2005 {
2006 "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
2007 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2008 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2009 },
2010 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
2011 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
2012 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
2013 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2014 { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
2015 SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
2016
2017
2018 { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2019 { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2020 { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2021 { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2022 { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2023
2024
2025 { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
2026 { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
2027 { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
2028 { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
2029 { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
2030
2031
2032 { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2033 { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2034 { },
2035};
2036
2037static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2038{
2039 int tmp;
2040 u8 id[SPI_NOR_MAX_ID_LEN];
2041 const struct flash_info *info;
2042
2043 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
2044 if (tmp < 0) {
2045 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
2046 return ERR_PTR(tmp);
2047 }
2048
2049 for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
2050 info = &spi_nor_ids[tmp];
2051 if (info->id_len) {
2052 if (!memcmp(info->id, id, info->id_len))
2053 return &spi_nor_ids[tmp];
2054 }
2055 }
2056 dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
2057 id[0], id[1], id[2]);
2058 return ERR_PTR(-ENODEV);
2059}
2060
2061static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2062 size_t *retlen, u_char *buf)
2063{
2064 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2065 int ret;
2066
2067 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2068
2069 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
2070 if (ret)
2071 return ret;
2072
2073 while (len) {
2074 loff_t addr = from;
2075
2076 if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
2077 addr = spi_nor_s3an_addr_convert(nor, addr);
2078
2079 ret = nor->read(nor, addr, len, buf);
2080 if (ret == 0) {
2081
2082 ret = -EIO;
2083 goto read_err;
2084 }
2085 if (ret < 0)
2086 goto read_err;
2087
2088 WARN_ON(ret > len);
2089 *retlen += ret;
2090 buf += ret;
2091 from += ret;
2092 len -= ret;
2093 }
2094 ret = 0;
2095
2096read_err:
2097 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
2098 return ret;
2099}
2100
2101static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
2102 size_t *retlen, const u_char *buf)
2103{
2104 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2105 size_t actual;
2106 int ret;
2107
2108 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2109
2110 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2111 if (ret)
2112 return ret;
2113
2114 write_enable(nor);
2115
2116 nor->sst_write_second = false;
2117
2118 actual = to % 2;
2119
2120 if (actual) {
2121 nor->program_opcode = SPINOR_OP_BP;
2122
2123
2124 ret = nor->write(nor, to, 1, buf);
2125 if (ret < 0)
2126 goto sst_write_err;
2127 WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2128 (int)ret);
2129 ret = spi_nor_wait_till_ready(nor);
2130 if (ret)
2131 goto sst_write_err;
2132 }
2133 to += actual;
2134
2135
2136 for (; actual < len - 1; actual += 2) {
2137 nor->program_opcode = SPINOR_OP_AAI_WP;
2138
2139
2140 ret = nor->write(nor, to, 2, buf + actual);
2141 if (ret < 0)
2142 goto sst_write_err;
2143 WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
2144 (int)ret);
2145 ret = spi_nor_wait_till_ready(nor);
2146 if (ret)
2147 goto sst_write_err;
2148 to += 2;
2149 nor->sst_write_second = true;
2150 }
2151 nor->sst_write_second = false;
2152
2153 write_disable(nor);
2154 ret = spi_nor_wait_till_ready(nor);
2155 if (ret)
2156 goto sst_write_err;
2157
2158
2159 if (actual != len) {
2160 write_enable(nor);
2161
2162 nor->program_opcode = SPINOR_OP_BP;
2163 ret = nor->write(nor, to, 1, buf + actual);
2164 if (ret < 0)
2165 goto sst_write_err;
2166 WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2167 (int)ret);
2168 ret = spi_nor_wait_till_ready(nor);
2169 if (ret)
2170 goto sst_write_err;
2171 write_disable(nor);
2172 actual += 1;
2173 }
2174sst_write_err:
2175 *retlen += actual;
2176 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2177 return ret;
2178}
2179
2180
2181
2182
2183
2184
2185static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2186 size_t *retlen, const u_char *buf)
2187{
2188 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2189 size_t page_offset, page_remain, i;
2190 ssize_t ret;
2191
2192 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2193
2194 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2195 if (ret)
2196 return ret;
2197
2198 for (i = 0; i < len; ) {
2199 ssize_t written;
2200 loff_t addr = to + i;
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210 if (hweight32(nor->page_size) == 1) {
2211 page_offset = addr & (nor->page_size - 1);
2212 } else {
2213 uint64_t aux = addr;
2214
2215 page_offset = do_div(aux, nor->page_size);
2216 }
2217
2218 page_remain = min_t(size_t,
2219 nor->page_size - page_offset, len - i);
2220
2221 if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
2222 addr = spi_nor_s3an_addr_convert(nor, addr);
2223
2224 write_enable(nor);
2225 ret = nor->write(nor, addr, page_remain, buf + i);
2226 if (ret < 0)
2227 goto write_err;
2228 written = ret;
2229
2230 ret = spi_nor_wait_till_ready(nor);
2231 if (ret)
2232 goto write_err;
2233 *retlen += written;
2234 i += written;
2235 }
2236
2237write_err:
2238 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2239 return ret;
2240}
2241
2242static int spi_nor_check(struct spi_nor *nor)
2243{
2244 if (!nor->dev || !nor->read || !nor->write ||
2245 !nor->read_reg || !nor->write_reg) {
2246 pr_err("spi-nor: please fill all the necessary fields!\n");
2247 return -EINVAL;
2248 }
2249
2250 return 0;
2251}
2252
2253static int s3an_nor_scan(struct spi_nor *nor)
2254{
2255 int ret;
2256 u8 val;
2257
2258 ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
2259 if (ret < 0) {
2260 dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
2261 return ret;
2262 }
2263
2264 nor->erase_opcode = SPINOR_OP_XSE;
2265 nor->program_opcode = SPINOR_OP_XPP;
2266 nor->read_opcode = SPINOR_OP_READ;
2267 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280 if (val & XSR_PAGESIZE) {
2281
2282 nor->page_size = (nor->page_size == 264) ? 256 : 512;
2283 nor->mtd.writebufsize = nor->page_size;
2284 nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
2285 nor->mtd.erasesize = 8 * nor->page_size;
2286 } else {
2287
2288 nor->flags |= SNOR_F_S3AN_ADDR_DEFAULT;
2289 }
2290
2291 return 0;
2292}
2293
2294static void
2295spi_nor_set_read_settings(struct spi_nor_read_command *read,
2296 u8 num_mode_clocks,
2297 u8 num_wait_states,
2298 u8 opcode,
2299 enum spi_nor_protocol proto)
2300{
2301 read->num_mode_clocks = num_mode_clocks;
2302 read->num_wait_states = num_wait_states;
2303 read->opcode = opcode;
2304 read->proto = proto;
2305}
2306
2307static void
2308spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
2309 u8 opcode,
2310 enum spi_nor_protocol proto)
2311{
2312 pp->opcode = opcode;
2313 pp->proto = proto;
2314}
2315
2316static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2317{
2318 size_t i;
2319
2320 for (i = 0; i < size; i++)
2321 if (table[i][0] == (int)hwcaps)
2322 return table[i][1];
2323
2324 return -EINVAL;
2325}
2326
2327static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2328{
2329 static const int hwcaps_read2cmd[][2] = {
2330 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2331 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2332 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2333 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2334 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2335 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2336 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2337 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2338 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2339 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2340 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2341 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2342 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2343 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2344 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2345 };
2346
2347 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2348 ARRAY_SIZE(hwcaps_read2cmd));
2349}
2350
2351static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2352{
2353 static const int hwcaps_pp2cmd[][2] = {
2354 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2355 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2356 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2357 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2358 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2359 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2360 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2361 };
2362
2363 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2364 ARRAY_SIZE(hwcaps_pp2cmd));
2365}
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
2384{
2385 int ret;
2386
2387 while (len) {
2388 ret = nor->read(nor, addr, len, buf);
2389 if (!ret || ret > len)
2390 return -EIO;
2391 if (ret < 0)
2392 return ret;
2393
2394 buf += ret;
2395 addr += ret;
2396 len -= ret;
2397 }
2398 return 0;
2399}
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
2415 size_t len, void *buf)
2416{
2417 u8 addr_width, read_opcode, read_dummy;
2418 int ret;
2419
2420 read_opcode = nor->read_opcode;
2421 addr_width = nor->addr_width;
2422 read_dummy = nor->read_dummy;
2423
2424 nor->read_opcode = SPINOR_OP_RDSFDP;
2425 nor->addr_width = 3;
2426 nor->read_dummy = 8;
2427
2428 ret = spi_nor_read_raw(nor, addr, len, buf);
2429
2430 nor->read_opcode = read_opcode;
2431 nor->addr_width = addr_width;
2432 nor->read_dummy = read_dummy;
2433
2434 return ret;
2435}
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
2451 size_t len, void *buf)
2452{
2453 void *dma_safe_buf;
2454 int ret;
2455
2456 dma_safe_buf = kmalloc(len, GFP_KERNEL);
2457 if (!dma_safe_buf)
2458 return -ENOMEM;
2459
2460 ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
2461 memcpy(buf, dma_safe_buf, len);
2462 kfree(dma_safe_buf);
2463
2464 return ret;
2465}
2466
2467
2468
2469static void
2470spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
2471 u16 half,
2472 enum spi_nor_protocol proto)
2473{
2474 read->num_mode_clocks = (half >> 5) & 0x07;
2475 read->num_wait_states = (half >> 0) & 0x1f;
2476 read->opcode = (half >> 8) & 0xff;
2477 read->proto = proto;
2478}
2479
2480struct sfdp_bfpt_read {
2481
2482 u32 hwcaps;
2483
2484
2485
2486
2487
2488 u32 supported_dword;
2489 u32 supported_bit;
2490
2491
2492
2493
2494
2495
2496 u32 settings_dword;
2497 u32 settings_shift;
2498
2499
2500 enum spi_nor_protocol proto;
2501};
2502
2503static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
2504
2505 {
2506 SNOR_HWCAPS_READ_1_1_2,
2507 BFPT_DWORD(1), BIT(16),
2508 BFPT_DWORD(4), 0,
2509 SNOR_PROTO_1_1_2,
2510 },
2511
2512
2513 {
2514 SNOR_HWCAPS_READ_1_2_2,
2515 BFPT_DWORD(1), BIT(20),
2516 BFPT_DWORD(4), 16,
2517 SNOR_PROTO_1_2_2,
2518 },
2519
2520
2521 {
2522 SNOR_HWCAPS_READ_2_2_2,
2523 BFPT_DWORD(5), BIT(0),
2524 BFPT_DWORD(6), 16,
2525 SNOR_PROTO_2_2_2,
2526 },
2527
2528
2529 {
2530 SNOR_HWCAPS_READ_1_1_4,
2531 BFPT_DWORD(1), BIT(22),
2532 BFPT_DWORD(3), 16,
2533 SNOR_PROTO_1_1_4,
2534 },
2535
2536
2537 {
2538 SNOR_HWCAPS_READ_1_4_4,
2539 BFPT_DWORD(1), BIT(21),
2540 BFPT_DWORD(3), 0,
2541 SNOR_PROTO_1_4_4,
2542 },
2543
2544
2545 {
2546 SNOR_HWCAPS_READ_4_4_4,
2547 BFPT_DWORD(5), BIT(4),
2548 BFPT_DWORD(7), 16,
2549 SNOR_PROTO_4_4_4,
2550 },
2551};
2552
2553struct sfdp_bfpt_erase {
2554
2555
2556
2557
2558 u32 dword;
2559 u32 shift;
2560};
2561
2562static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
2563
2564 {BFPT_DWORD(8), 0},
2565
2566
2567 {BFPT_DWORD(8), 16},
2568
2569
2570 {BFPT_DWORD(9), 0},
2571
2572
2573 {BFPT_DWORD(9), 16},
2574};
2575
2576
2577
2578
2579
2580
2581
2582static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
2583 u32 size, u8 opcode)
2584{
2585 erase->size = size;
2586 erase->opcode = opcode;
2587
2588 erase->size_shift = ffs(erase->size) - 1;
2589 erase->size_mask = (1 << erase->size_shift) - 1;
2590}
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605static void
2606spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
2607 u32 size, u8 opcode, u8 i)
2608{
2609 erase->idx = i;
2610 spi_nor_set_erase_type(erase, size, opcode);
2611}
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
2625{
2626 const struct spi_nor_erase_type *left = l, *right = r;
2627
2628 return left->size - right->size;
2629}
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
2643{
2644 struct spi_nor_erase_type *erase_type = map->erase_type;
2645 int i;
2646 u8 sorted_erase_mask = 0;
2647
2648 if (!erase_mask)
2649 return 0;
2650
2651
2652 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
2653 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
2654 sorted_erase_mask |= BIT(i);
2655
2656 return sorted_erase_mask;
2657}
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
2672{
2673 struct spi_nor_erase_region *region = map->regions;
2674 u8 region_erase_mask, sorted_erase_mask;
2675
2676 while (region) {
2677 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
2678
2679 sorted_erase_mask = spi_nor_sort_erase_mask(map,
2680 region_erase_mask);
2681
2682
2683 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
2684 sorted_erase_mask;
2685
2686 region = spi_nor_region_next(region);
2687 }
2688}
2689
2690
2691
2692
2693
2694
2695
2696
2697static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2698 u8 erase_mask, u64 flash_size)
2699{
2700
2701 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2702 SNOR_LAST_REGION;
2703 map->uniform_region.size = flash_size;
2704 map->regions = &map->uniform_region;
2705 map->uniform_erase_type = erase_mask;
2706}
2707
2708static int
2709spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2710 const struct sfdp_parameter_header *bfpt_header,
2711 const struct sfdp_bfpt *bfpt,
2712 struct spi_nor_flash_parameter *params)
2713{
2714 if (nor->info->fixups && nor->info->fixups->post_bfpt)
2715 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
2716 params);
2717
2718 return 0;
2719}
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751static int spi_nor_parse_bfpt(struct spi_nor *nor,
2752 const struct sfdp_parameter_header *bfpt_header,
2753 struct spi_nor_flash_parameter *params)
2754{
2755 struct spi_nor_erase_map *map = &nor->erase_map;
2756 struct spi_nor_erase_type *erase_type = map->erase_type;
2757 struct sfdp_bfpt bfpt;
2758 size_t len;
2759 int i, cmd, err;
2760 u32 addr;
2761 u16 half;
2762 u8 erase_mask;
2763
2764
2765 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
2766 return -EINVAL;
2767
2768
2769 len = min_t(size_t, sizeof(bfpt),
2770 bfpt_header->length * sizeof(u32));
2771 addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
2772 memset(&bfpt, 0, sizeof(bfpt));
2773 err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
2774 if (err < 0)
2775 return err;
2776
2777
2778 for (i = 0; i < BFPT_DWORD_MAX; i++)
2779 bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
2780
2781
2782 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
2783 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
2784 nor->addr_width = 3;
2785 break;
2786
2787 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
2788 nor->addr_width = 4;
2789 break;
2790
2791 default:
2792 break;
2793 }
2794
2795
2796 params->size = bfpt.dwords[BFPT_DWORD(2)];
2797 if (params->size & BIT(31)) {
2798 params->size &= ~BIT(31);
2799
2800
2801
2802
2803
2804
2805 if (params->size > 63)
2806 return -EINVAL;
2807
2808 params->size = 1ULL << params->size;
2809 } else {
2810 params->size++;
2811 }
2812 params->size >>= 3;
2813
2814
2815 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
2816 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
2817 struct spi_nor_read_command *read;
2818
2819 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
2820 params->hwcaps.mask &= ~rd->hwcaps;
2821 continue;
2822 }
2823
2824 params->hwcaps.mask |= rd->hwcaps;
2825 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
2826 read = ¶ms->reads[cmd];
2827 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
2828 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
2829 }
2830
2831
2832
2833
2834
2835 erase_mask = 0;
2836 memset(&nor->erase_map, 0, sizeof(nor->erase_map));
2837 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
2838 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
2839 u32 erasesize;
2840 u8 opcode;
2841
2842 half = bfpt.dwords[er->dword] >> er->shift;
2843 erasesize = half & 0xff;
2844
2845
2846 if (!erasesize)
2847 continue;
2848
2849 erasesize = 1U << erasesize;
2850 opcode = (half >> 8) & 0xff;
2851 erase_mask |= BIT(i);
2852 spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
2853 opcode, i);
2854 }
2855 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2856
2857
2858
2859
2860 sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
2861 spi_nor_map_cmp_erase_type, NULL);
2862
2863
2864
2865
2866
2867 spi_nor_regions_sort_erase_types(map);
2868 map->uniform_erase_type = map->uniform_region.offset &
2869 SNOR_ERASE_TYPE_MASK;
2870
2871
2872 if (bfpt_header->length < BFPT_DWORD_MAX)
2873 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
2874 params);
2875
2876
2877 params->page_size = bfpt.dwords[BFPT_DWORD(11)];
2878 params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
2879 params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
2880 params->page_size = 1U << params->page_size;
2881
2882
2883 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
2884 case BFPT_DWORD15_QER_NONE:
2885 params->quad_enable = NULL;
2886 break;
2887
2888 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
2889 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
2890 params->quad_enable = spansion_no_read_cr_quad_enable;
2891 break;
2892
2893 case BFPT_DWORD15_QER_SR1_BIT6:
2894 params->quad_enable = macronix_quad_enable;
2895 break;
2896
2897 case BFPT_DWORD15_QER_SR2_BIT7:
2898 params->quad_enable = sr2_bit7_quad_enable;
2899 break;
2900
2901 case BFPT_DWORD15_QER_SR2_BIT1:
2902 params->quad_enable = spansion_read_cr_quad_enable;
2903 break;
2904
2905 default:
2906 return -EINVAL;
2907 }
2908
2909 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
2910}
2911
2912#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
2913#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
2914#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
2915#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
2916#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
2917
2918#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
2919#define SMPT_CMD_READ_DUMMY_SHIFT 16
2920#define SMPT_CMD_READ_DUMMY(_cmd) \
2921 (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
2922#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
2923
2924#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
2925#define SMPT_CMD_READ_DATA_SHIFT 24
2926#define SMPT_CMD_READ_DATA(_cmd) \
2927 (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
2928
2929#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
2930#define SMPT_CMD_OPCODE_SHIFT 8
2931#define SMPT_CMD_OPCODE(_cmd) \
2932 (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
2933
2934#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
2935#define SMPT_MAP_REGION_COUNT_SHIFT 16
2936#define SMPT_MAP_REGION_COUNT(_header) \
2937 ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
2938 SMPT_MAP_REGION_COUNT_SHIFT) + 1)
2939
2940#define SMPT_MAP_ID_MASK GENMASK(15, 8)
2941#define SMPT_MAP_ID_SHIFT 8
2942#define SMPT_MAP_ID(_header) \
2943 (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
2944
2945#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
2946#define SMPT_MAP_REGION_SIZE_SHIFT 8
2947#define SMPT_MAP_REGION_SIZE(_region) \
2948 (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
2949 SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
2950
2951#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
2952#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
2953 ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
2954
2955#define SMPT_DESC_TYPE_MAP BIT(1)
2956#define SMPT_DESC_END BIT(0)
2957
2958
2959
2960
2961
2962
2963
2964static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
2965{
2966 switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
2967 case SMPT_CMD_ADDRESS_LEN_0:
2968 return 0;
2969 case SMPT_CMD_ADDRESS_LEN_3:
2970 return 3;
2971 case SMPT_CMD_ADDRESS_LEN_4:
2972 return 4;
2973 case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
2974
2975 default:
2976 return nor->addr_width;
2977 }
2978}
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
2989{
2990 u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
2991
2992 if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
2993 return nor->read_dummy;
2994 return read_dummy;
2995}
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
3006 u8 smpt_len)
3007{
3008 const u32 *ret;
3009 u8 *buf;
3010 u32 addr;
3011 int err;
3012 u8 i;
3013 u8 addr_width, read_opcode, read_dummy;
3014 u8 read_data_mask, map_id;
3015
3016
3017 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3018 if (!buf)
3019 return ERR_PTR(-ENOMEM);
3020
3021 addr_width = nor->addr_width;
3022 read_dummy = nor->read_dummy;
3023 read_opcode = nor->read_opcode;
3024
3025 map_id = 0;
3026
3027 for (i = 0; i < smpt_len; i += 2) {
3028 if (smpt[i] & SMPT_DESC_TYPE_MAP)
3029 break;
3030
3031 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
3032 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
3033 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
3034 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
3035 addr = smpt[i + 1];
3036
3037 err = spi_nor_read_raw(nor, addr, 1, buf);
3038 if (err) {
3039 ret = ERR_PTR(err);
3040 goto out;
3041 }
3042
3043
3044
3045
3046
3047 map_id = map_id << 1 | !!(*buf & read_data_mask);
3048 }
3049
3050
3051
3052
3053
3054
3055
3056
3057 ret = ERR_PTR(-EINVAL);
3058 while (i < smpt_len) {
3059 if (SMPT_MAP_ID(smpt[i]) == map_id) {
3060 ret = smpt + i;
3061 break;
3062 }
3063
3064
3065
3066
3067
3068
3069 if (smpt[i] & SMPT_DESC_END)
3070 break;
3071
3072
3073 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
3074 }
3075
3076
3077out:
3078 kfree(buf);
3079 nor->addr_width = addr_width;
3080 nor->read_dummy = read_dummy;
3081 nor->read_opcode = read_opcode;
3082 return ret;
3083}
3084
3085
3086
3087
3088
3089
3090
3091static void
3092spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
3093 const struct spi_nor_erase_type *erase,
3094 const u8 erase_type)
3095{
3096 int i;
3097
3098 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3099 if (!(erase_type & BIT(i)))
3100 continue;
3101 if (region->size & erase[i].size_mask) {
3102 spi_nor_region_mark_overlay(region);
3103 return;
3104 }
3105 }
3106}
3107
3108
3109
3110
3111
3112
3113
3114
3115static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
3116 const u32 *smpt)
3117{
3118 struct spi_nor_erase_map *map = &nor->erase_map;
3119 struct spi_nor_erase_type *erase = map->erase_type;
3120 struct spi_nor_erase_region *region;
3121 u64 offset;
3122 u32 region_count;
3123 int i, j;
3124 u8 uniform_erase_type, save_uniform_erase_type;
3125 u8 erase_type, regions_erase_type;
3126
3127 region_count = SMPT_MAP_REGION_COUNT(*smpt);
3128
3129
3130
3131
3132 region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
3133 GFP_KERNEL);
3134 if (!region)
3135 return -ENOMEM;
3136 map->regions = region;
3137
3138 uniform_erase_type = 0xff;
3139 regions_erase_type = 0;
3140 offset = 0;
3141
3142 for (i = 0; i < region_count; i++) {
3143 j = i + 1;
3144 region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
3145 erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
3146 region[i].offset = offset | erase_type;
3147
3148 spi_nor_region_check_overlay(®ion[i], erase, erase_type);
3149
3150
3151
3152
3153
3154 uniform_erase_type &= erase_type;
3155
3156
3157
3158
3159
3160 regions_erase_type |= erase_type;
3161
3162 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
3163 region[i].size;
3164 }
3165
3166 save_uniform_erase_type = map->uniform_erase_type;
3167 map->uniform_erase_type = spi_nor_sort_erase_mask(map,
3168 uniform_erase_type);
3169
3170 if (!regions_erase_type) {
3171
3172
3173
3174
3175 map->uniform_erase_type = save_uniform_erase_type;
3176 return -EINVAL;
3177 }
3178
3179
3180
3181
3182
3183
3184 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3185 if (!(regions_erase_type & BIT(erase[i].idx)))
3186 spi_nor_set_erase_type(&erase[i], 0, 0xFF);
3187
3188 spi_nor_region_mark_end(®ion[i - 1]);
3189
3190 return 0;
3191}
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204static int spi_nor_parse_smpt(struct spi_nor *nor,
3205 const struct sfdp_parameter_header *smpt_header)
3206{
3207 const u32 *sector_map;
3208 u32 *smpt;
3209 size_t len;
3210 u32 addr;
3211 int i, ret;
3212
3213
3214 len = smpt_header->length * sizeof(*smpt);
3215 smpt = kmalloc(len, GFP_KERNEL);
3216 if (!smpt)
3217 return -ENOMEM;
3218
3219 addr = SFDP_PARAM_HEADER_PTP(smpt_header);
3220 ret = spi_nor_read_sfdp(nor, addr, len, smpt);
3221 if (ret)
3222 goto out;
3223
3224
3225 for (i = 0; i < smpt_header->length; i++)
3226 smpt[i] = le32_to_cpu(smpt[i]);
3227
3228 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
3229 if (IS_ERR(sector_map)) {
3230 ret = PTR_ERR(sector_map);
3231 goto out;
3232 }
3233
3234 ret = spi_nor_init_non_uniform_erase_map(nor, sector_map);
3235 if (ret)
3236 goto out;
3237
3238 spi_nor_regions_sort_erase_types(&nor->erase_map);
3239
3240out:
3241 kfree(smpt);
3242 return ret;
3243}
3244
3245#define SFDP_4BAIT_DWORD_MAX 2
3246
3247struct sfdp_4bait {
3248
3249 u32 hwcaps;
3250
3251
3252
3253
3254
3255 u32 supported_bit;
3256};
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267static int spi_nor_parse_4bait(struct spi_nor *nor,
3268 const struct sfdp_parameter_header *param_header,
3269 struct spi_nor_flash_parameter *params)
3270{
3271 static const struct sfdp_4bait reads[] = {
3272 { SNOR_HWCAPS_READ, BIT(0) },
3273 { SNOR_HWCAPS_READ_FAST, BIT(1) },
3274 { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
3275 { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
3276 { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
3277 { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
3278 { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
3279 { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
3280 { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
3281 };
3282 static const struct sfdp_4bait programs[] = {
3283 { SNOR_HWCAPS_PP, BIT(6) },
3284 { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
3285 { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
3286 };
3287 static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
3288 { 0u , BIT(9) },
3289 { 0u , BIT(10) },
3290 { 0u , BIT(11) },
3291 { 0u , BIT(12) },
3292 };
3293 struct spi_nor_pp_command *params_pp = params->page_programs;
3294 struct spi_nor_erase_map *map = &nor->erase_map;
3295 struct spi_nor_erase_type *erase_type = map->erase_type;
3296 u32 *dwords;
3297 size_t len;
3298 u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
3299 int i, ret;
3300
3301 if (param_header->major != SFDP_JESD216_MAJOR ||
3302 param_header->length < SFDP_4BAIT_DWORD_MAX)
3303 return -EINVAL;
3304
3305
3306 len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
3307
3308
3309 dwords = kmalloc(len, GFP_KERNEL);
3310 if (!dwords)
3311 return -ENOMEM;
3312
3313 addr = SFDP_PARAM_HEADER_PTP(param_header);
3314 ret = spi_nor_read_sfdp(nor, addr, len, dwords);
3315 if (ret)
3316 return ret;
3317
3318
3319 for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
3320 dwords[i] = le32_to_cpu(dwords[i]);
3321
3322
3323
3324
3325
3326 discard_hwcaps = 0;
3327 read_hwcaps = 0;
3328 for (i = 0; i < ARRAY_SIZE(reads); i++) {
3329 const struct sfdp_4bait *read = &reads[i];
3330
3331 discard_hwcaps |= read->hwcaps;
3332 if ((params->hwcaps.mask & read->hwcaps) &&
3333 (dwords[0] & read->supported_bit))
3334 read_hwcaps |= read->hwcaps;
3335 }
3336
3337
3338
3339
3340
3341 pp_hwcaps = 0;
3342 for (i = 0; i < ARRAY_SIZE(programs); i++) {
3343 const struct sfdp_4bait *program = &programs[i];
3344
3345
3346
3347
3348
3349
3350
3351 discard_hwcaps |= program->hwcaps;
3352 if (dwords[0] & program->supported_bit)
3353 pp_hwcaps |= program->hwcaps;
3354 }
3355
3356
3357
3358
3359
3360 erase_mask = 0;
3361 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3362 const struct sfdp_4bait *erase = &erases[i];
3363
3364 if (dwords[0] & erase->supported_bit)
3365 erase_mask |= BIT(i);
3366 }
3367
3368
3369 erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
3370
3371
3372
3373
3374
3375
3376 if (!read_hwcaps || !pp_hwcaps || !erase_mask)
3377 goto out;
3378
3379
3380
3381
3382
3383 params->hwcaps.mask &= ~discard_hwcaps;
3384 params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
3385
3386
3387 for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
3388 struct spi_nor_read_command *read_cmd = ¶ms->reads[i];
3389
3390 read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
3391 }
3392
3393
3394 if (pp_hwcaps & SNOR_HWCAPS_PP)
3395 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP],
3396 SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
3397 if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
3398 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4],
3399 SPINOR_OP_PP_1_1_4_4B,
3400 SNOR_PROTO_1_1_4);
3401 if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
3402 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_4_4],
3403 SPINOR_OP_PP_1_4_4_4B,
3404 SNOR_PROTO_1_4_4);
3405
3406 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3407 if (erase_mask & BIT(i))
3408 erase_type[i].opcode = (dwords[1] >>
3409 erase_type[i].idx * 8) & 0xFF;
3410 else
3411 spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
3412 }
3413
3414
3415
3416
3417
3418
3419
3420
3421 nor->addr_width = 4;
3422 nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
3423
3424
3425out:
3426 kfree(dwords);
3427 return ret;
3428}
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444static int spi_nor_parse_sfdp(struct spi_nor *nor,
3445 struct spi_nor_flash_parameter *params)
3446{
3447 const struct sfdp_parameter_header *param_header, *bfpt_header;
3448 struct sfdp_parameter_header *param_headers = NULL;
3449 struct sfdp_header header;
3450 struct device *dev = nor->dev;
3451 size_t psize;
3452 int i, err;
3453
3454
3455 err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
3456 if (err < 0)
3457 return err;
3458
3459
3460 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
3461 header.major != SFDP_JESD216_MAJOR)
3462 return -EINVAL;
3463
3464
3465
3466
3467
3468 bfpt_header = &header.bfpt_header;
3469 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
3470 bfpt_header->major != SFDP_JESD216_MAJOR)
3471 return -EINVAL;
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484 if (header.nph) {
3485 psize = header.nph * sizeof(*param_headers);
3486
3487 param_headers = kmalloc(psize, GFP_KERNEL);
3488 if (!param_headers)
3489 return -ENOMEM;
3490
3491 err = spi_nor_read_sfdp(nor, sizeof(header),
3492 psize, param_headers);
3493 if (err < 0) {
3494 dev_err(dev, "failed to read SFDP parameter headers\n");
3495 goto exit;
3496 }
3497 }
3498
3499
3500
3501
3502
3503 for (i = 0; i < header.nph; i++) {
3504 param_header = ¶m_headers[i];
3505
3506 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
3507 param_header->major == SFDP_JESD216_MAJOR &&
3508 (param_header->minor > bfpt_header->minor ||
3509 (param_header->minor == bfpt_header->minor &&
3510 param_header->length > bfpt_header->length)))
3511 bfpt_header = param_header;
3512 }
3513
3514 err = spi_nor_parse_bfpt(nor, bfpt_header, params);
3515 if (err)
3516 goto exit;
3517
3518
3519 for (i = 0; i < header.nph; i++) {
3520 param_header = ¶m_headers[i];
3521
3522 switch (SFDP_PARAM_HEADER_ID(param_header)) {
3523 case SFDP_SECTOR_MAP_ID:
3524 err = spi_nor_parse_smpt(nor, param_header);
3525 break;
3526
3527 case SFDP_4BAIT_ID:
3528 err = spi_nor_parse_4bait(nor, param_header, params);
3529 break;
3530
3531 default:
3532 break;
3533 }
3534
3535 if (err) {
3536 dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
3537 SFDP_PARAM_HEADER_ID(param_header));
3538
3539
3540
3541
3542
3543
3544 err = 0;
3545 }
3546 }
3547
3548exit:
3549 kfree(param_headers);
3550 return err;
3551}
3552
3553static int spi_nor_init_params(struct spi_nor *nor,
3554 struct spi_nor_flash_parameter *params)
3555{
3556 struct spi_nor_erase_map *map = &nor->erase_map;
3557 const struct flash_info *info = nor->info;
3558 u8 i, erase_mask;
3559
3560
3561 memset(params, 0, sizeof(*params));
3562
3563
3564 params->size = (u64)info->sector_size * info->n_sectors;
3565 params->page_size = info->page_size;
3566
3567
3568 params->hwcaps.mask |= SNOR_HWCAPS_READ;
3569 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
3570 0, 0, SPINOR_OP_READ,
3571 SNOR_PROTO_1_1_1);
3572
3573 if (!(info->flags & SPI_NOR_NO_FR)) {
3574 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
3575 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
3576 0, 8, SPINOR_OP_READ_FAST,
3577 SNOR_PROTO_1_1_1);
3578 }
3579
3580 if (info->flags & SPI_NOR_DUAL_READ) {
3581 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
3582 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
3583 0, 8, SPINOR_OP_READ_1_1_2,
3584 SNOR_PROTO_1_1_2);
3585 }
3586
3587 if (info->flags & SPI_NOR_QUAD_READ) {
3588 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
3589 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
3590 0, 8, SPINOR_OP_READ_1_1_4,
3591 SNOR_PROTO_1_1_4);
3592 }
3593
3594
3595 params->hwcaps.mask |= SNOR_HWCAPS_PP;
3596 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
3597 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
3598
3599
3600
3601
3602
3603 erase_mask = 0;
3604 i = 0;
3605 if (info->flags & SECT_4K_PMC) {
3606 erase_mask |= BIT(i);
3607 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
3608 SPINOR_OP_BE_4K_PMC);
3609 i++;
3610 } else if (info->flags & SECT_4K) {
3611 erase_mask |= BIT(i);
3612 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
3613 SPINOR_OP_BE_4K);
3614 i++;
3615 }
3616 erase_mask |= BIT(i);
3617 spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
3618 SPINOR_OP_SE);
3619 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
3620
3621
3622 if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
3623 SNOR_HWCAPS_PP_QUAD)) {
3624 switch (JEDEC_MFR(info)) {
3625 case SNOR_MFR_MACRONIX:
3626 params->quad_enable = macronix_quad_enable;
3627 break;
3628
3629 case SNOR_MFR_ST:
3630 case SNOR_MFR_MICRON:
3631 break;
3632
3633 default:
3634
3635 params->quad_enable = spansion_quad_enable;
3636 break;
3637 }
3638
3639
3640
3641
3642
3643
3644
3645 if (info->quad_enable)
3646 params->quad_enable = info->quad_enable;
3647 }
3648
3649 if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
3650 !(info->flags & SPI_NOR_SKIP_SFDP)) {
3651 struct spi_nor_flash_parameter sfdp_params;
3652 struct spi_nor_erase_map prev_map;
3653
3654 memcpy(&sfdp_params, params, sizeof(sfdp_params));
3655 memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
3656
3657 if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
3658 nor->addr_width = 0;
3659 nor->flags &= ~SNOR_F_4B_OPCODES;
3660
3661 memcpy(&nor->erase_map, &prev_map,
3662 sizeof(nor->erase_map));
3663 } else {
3664 memcpy(params, &sfdp_params, sizeof(*params));
3665 }
3666 }
3667
3668 return 0;
3669}
3670
3671static int spi_nor_select_read(struct spi_nor *nor,
3672 const struct spi_nor_flash_parameter *params,
3673 u32 shared_hwcaps)
3674{
3675 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
3676 const struct spi_nor_read_command *read;
3677
3678 if (best_match < 0)
3679 return -EINVAL;
3680
3681 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
3682 if (cmd < 0)
3683 return -EINVAL;
3684
3685 read = ¶ms->reads[cmd];
3686 nor->read_opcode = read->opcode;
3687 nor->read_proto = read->proto;
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
3700 return 0;
3701}
3702
3703static int spi_nor_select_pp(struct spi_nor *nor,
3704 const struct spi_nor_flash_parameter *params,
3705 u32 shared_hwcaps)
3706{
3707 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
3708 const struct spi_nor_pp_command *pp;
3709
3710 if (best_match < 0)
3711 return -EINVAL;
3712
3713 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
3714 if (cmd < 0)
3715 return -EINVAL;
3716
3717 pp = ¶ms->page_programs[cmd];
3718 nor->program_opcode = pp->opcode;
3719 nor->write_proto = pp->proto;
3720 return 0;
3721}
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735static const struct spi_nor_erase_type *
3736spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
3737 const u32 wanted_size)
3738{
3739 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
3740 int i;
3741 u8 uniform_erase_type = map->uniform_erase_type;
3742
3743 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3744 if (!(uniform_erase_type & BIT(i)))
3745 continue;
3746
3747 tested_erase = &map->erase_type[i];
3748
3749
3750
3751
3752
3753 if (tested_erase->size == wanted_size) {
3754 erase = tested_erase;
3755 break;
3756 }
3757
3758
3759
3760
3761
3762 if (!erase && tested_erase->size)
3763 erase = tested_erase;
3764
3765 }
3766
3767 if (!erase)
3768 return NULL;
3769
3770
3771 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
3772 map->uniform_erase_type |= BIT(erase - map->erase_type);
3773 return erase;
3774}
3775
3776static int spi_nor_select_erase(struct spi_nor *nor, u32 wanted_size)
3777{
3778 struct spi_nor_erase_map *map = &nor->erase_map;
3779 const struct spi_nor_erase_type *erase = NULL;
3780 struct mtd_info *mtd = &nor->mtd;
3781 int i;
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
3792
3793 wanted_size = 4096u;
3794#endif
3795
3796 if (spi_nor_has_uniform_erase(nor)) {
3797 erase = spi_nor_select_uniform_erase(map, wanted_size);
3798 if (!erase)
3799 return -EINVAL;
3800 nor->erase_opcode = erase->opcode;
3801 mtd->erasesize = erase->size;
3802 return 0;
3803 }
3804
3805
3806
3807
3808
3809 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3810 if (map->erase_type[i].size) {
3811 erase = &map->erase_type[i];
3812 break;
3813 }
3814 }
3815
3816 if (!erase)
3817 return -EINVAL;
3818
3819 mtd->erasesize = erase->size;
3820 return 0;
3821}
3822
3823static int spi_nor_setup(struct spi_nor *nor,
3824 const struct spi_nor_flash_parameter *params,
3825 const struct spi_nor_hwcaps *hwcaps)
3826{
3827 u32 ignored_mask, shared_mask;
3828 bool enable_quad_io;
3829 int err;
3830
3831
3832
3833
3834
3835 shared_mask = hwcaps->mask & params->hwcaps.mask;
3836
3837
3838 ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
3839 SNOR_HWCAPS_READ_4_4_4 |
3840 SNOR_HWCAPS_READ_8_8_8 |
3841 SNOR_HWCAPS_PP_4_4_4 |
3842 SNOR_HWCAPS_PP_8_8_8);
3843 if (shared_mask & ignored_mask) {
3844 dev_dbg(nor->dev,
3845 "SPI n-n-n protocols are not supported yet.\n");
3846 shared_mask &= ~ignored_mask;
3847 }
3848
3849
3850 err = spi_nor_select_read(nor, params, shared_mask);
3851 if (err) {
3852 dev_err(nor->dev,
3853 "can't select read settings supported by both the SPI controller and memory.\n");
3854 return err;
3855 }
3856
3857
3858 err = spi_nor_select_pp(nor, params, shared_mask);
3859 if (err) {
3860 dev_err(nor->dev,
3861 "can't select write settings supported by both the SPI controller and memory.\n");
3862 return err;
3863 }
3864
3865
3866 err = spi_nor_select_erase(nor, nor->info->sector_size);
3867 if (err) {
3868 dev_err(nor->dev,
3869 "can't select erase settings supported by both the SPI controller and memory.\n");
3870 return err;
3871 }
3872
3873
3874 enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
3875 spi_nor_get_protocol_width(nor->write_proto) == 4);
3876 if (enable_quad_io && params->quad_enable)
3877 nor->quad_enable = params->quad_enable;
3878 else
3879 nor->quad_enable = NULL;
3880
3881 return 0;
3882}
3883
3884static int spi_nor_init(struct spi_nor *nor)
3885{
3886 int err;
3887
3888
3889
3890
3891
3892 if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
3893 JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
3894 JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
3895 nor->info->flags & SPI_NOR_HAS_LOCK) {
3896 write_enable(nor);
3897 write_sr(nor, 0);
3898 spi_nor_wait_till_ready(nor);
3899 }
3900
3901 if (nor->quad_enable) {
3902 err = nor->quad_enable(nor);
3903 if (err) {
3904 dev_err(nor->dev, "quad mode not supported\n");
3905 return err;
3906 }
3907 }
3908
3909 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
3910
3911
3912
3913
3914
3915
3916
3917 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
3918 "enabling reset hack; may not recover from unexpected reboots\n");
3919 set_4byte(nor, true);
3920 }
3921
3922 return 0;
3923}
3924
3925
3926static void spi_nor_resume(struct mtd_info *mtd)
3927{
3928 struct spi_nor *nor = mtd_to_spi_nor(mtd);
3929 struct device *dev = nor->dev;
3930 int ret;
3931
3932
3933 ret = spi_nor_init(nor);
3934 if (ret)
3935 dev_err(dev, "resume() failed\n");
3936}
3937
3938void spi_nor_restore(struct spi_nor *nor)
3939{
3940
3941 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
3942 nor->flags & SNOR_F_BROKEN_RESET)
3943 set_4byte(nor, false);
3944}
3945EXPORT_SYMBOL_GPL(spi_nor_restore);
3946
3947static const struct flash_info *spi_nor_match_id(const char *name)
3948{
3949 const struct flash_info *id = spi_nor_ids;
3950
3951 while (id->name) {
3952 if (!strcmp(name, id->name))
3953 return id;
3954 id++;
3955 }
3956 return NULL;
3957}
3958
3959int spi_nor_scan(struct spi_nor *nor, const char *name,
3960 const struct spi_nor_hwcaps *hwcaps)
3961{
3962 struct spi_nor_flash_parameter params;
3963 const struct flash_info *info = NULL;
3964 struct device *dev = nor->dev;
3965 struct mtd_info *mtd = &nor->mtd;
3966 struct device_node *np = spi_nor_get_flash_node(nor);
3967 int ret;
3968 int i;
3969
3970 ret = spi_nor_check(nor);
3971 if (ret)
3972 return ret;
3973
3974
3975 nor->reg_proto = SNOR_PROTO_1_1_1;
3976 nor->read_proto = SNOR_PROTO_1_1_1;
3977 nor->write_proto = SNOR_PROTO_1_1_1;
3978
3979 if (name)
3980 info = spi_nor_match_id(name);
3981
3982 if (!info)
3983 info = spi_nor_read_id(nor);
3984 if (IS_ERR_OR_NULL(info))
3985 return -ENOENT;
3986
3987
3988
3989
3990
3991 if (name && info->id_len) {
3992 const struct flash_info *jinfo;
3993
3994 jinfo = spi_nor_read_id(nor);
3995 if (IS_ERR(jinfo)) {
3996 return PTR_ERR(jinfo);
3997 } else if (jinfo != info) {
3998
3999
4000
4001
4002
4003
4004
4005 dev_warn(dev, "found %s, expected %s\n",
4006 jinfo->name, info->name);
4007 info = jinfo;
4008 }
4009 }
4010
4011 nor->info = info;
4012
4013 mutex_init(&nor->lock);
4014
4015
4016
4017
4018
4019
4020 if (info->flags & SPI_S3AN)
4021 nor->flags |= SNOR_F_READY_XSR_RDY;
4022
4023
4024 ret = spi_nor_init_params(nor, ¶ms);
4025 if (ret)
4026 return ret;
4027
4028 if (!mtd->name)
4029 mtd->name = dev_name(dev);
4030 mtd->priv = nor;
4031 mtd->type = MTD_NORFLASH;
4032 mtd->writesize = 1;
4033 mtd->flags = MTD_CAP_NORFLASH;
4034 mtd->size = params.size;
4035 mtd->_erase = spi_nor_erase;
4036 mtd->_read = spi_nor_read;
4037 mtd->_resume = spi_nor_resume;
4038
4039
4040 if (JEDEC_MFR(info) == SNOR_MFR_ST ||
4041 JEDEC_MFR(info) == SNOR_MFR_MICRON ||
4042 info->flags & SPI_NOR_HAS_LOCK) {
4043 nor->flash_lock = stm_lock;
4044 nor->flash_unlock = stm_unlock;
4045 nor->flash_is_locked = stm_is_locked;
4046 }
4047
4048 if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
4049 mtd->_lock = spi_nor_lock;
4050 mtd->_unlock = spi_nor_unlock;
4051 mtd->_is_locked = spi_nor_is_locked;
4052 }
4053
4054
4055 if (info->flags & SST_WRITE)
4056 mtd->_write = sst_write;
4057 else
4058 mtd->_write = spi_nor_write;
4059
4060 if (info->flags & USE_FSR)
4061 nor->flags |= SNOR_F_USE_FSR;
4062 if (info->flags & SPI_NOR_HAS_TB)
4063 nor->flags |= SNOR_F_HAS_SR_TB;
4064 if (info->flags & NO_CHIP_ERASE)
4065 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
4066 if (info->flags & USE_CLSR)
4067 nor->flags |= SNOR_F_USE_CLSR;
4068
4069 if (info->flags & SPI_NOR_NO_ERASE)
4070 mtd->flags |= MTD_NO_ERASE;
4071
4072 mtd->dev.parent = dev;
4073 nor->page_size = params.page_size;
4074 mtd->writebufsize = nor->page_size;
4075
4076 if (np) {
4077
4078 if (of_property_read_bool(np, "m25p,fast-read"))
4079 params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
4080 else
4081 params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
4082 } else {
4083
4084 params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
4085 }
4086
4087 if (of_property_read_bool(np, "broken-flash-reset"))
4088 nor->flags |= SNOR_F_BROKEN_RESET;
4089
4090
4091 if (info->flags & SPI_NOR_NO_FR)
4092 params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
4093
4094
4095
4096
4097
4098
4099
4100
4101 ret = spi_nor_setup(nor, ¶ms, hwcaps);
4102 if (ret)
4103 return ret;
4104
4105 if (nor->addr_width) {
4106
4107 } else if (info->addr_width) {
4108 nor->addr_width = info->addr_width;
4109 } else if (mtd->size > 0x1000000) {
4110
4111 nor->addr_width = 4;
4112 } else {
4113 nor->addr_width = 3;
4114 }
4115
4116 if (info->flags & SPI_NOR_4B_OPCODES ||
4117 (JEDEC_MFR(info) == SNOR_MFR_SPANSION && mtd->size > SZ_16M))
4118 nor->flags |= SNOR_F_4B_OPCODES;
4119
4120 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
4121 !(nor->flags & SNOR_F_HAS_4BAIT))
4122 spi_nor_set_4byte_opcodes(nor);
4123
4124 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4125 dev_err(dev, "address width is too large: %u\n",
4126 nor->addr_width);
4127 return -EINVAL;
4128 }
4129
4130 if (info->flags & SPI_S3AN) {
4131 ret = s3an_nor_scan(nor);
4132 if (ret)
4133 return ret;
4134 }
4135
4136
4137 ret = spi_nor_init(nor);
4138 if (ret)
4139 return ret;
4140
4141 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
4142 (long long)mtd->size >> 10);
4143
4144 dev_dbg(dev,
4145 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
4146 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
4147 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
4148 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
4149
4150 if (mtd->numeraseregions)
4151 for (i = 0; i < mtd->numeraseregions; i++)
4152 dev_dbg(dev,
4153 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
4154 ".erasesize = 0x%.8x (%uKiB), "
4155 ".numblocks = %d }\n",
4156 i, (long long)mtd->eraseregions[i].offset,
4157 mtd->eraseregions[i].erasesize,
4158 mtd->eraseregions[i].erasesize / 1024,
4159 mtd->eraseregions[i].numblocks);
4160 return 0;
4161}
4162EXPORT_SYMBOL_GPL(spi_nor_scan);
4163
4164MODULE_LICENSE("GPL v2");
4165MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
4166MODULE_AUTHOR("Mike Lavender");
4167MODULE_DESCRIPTION("framework for SPI NOR");
4168