1
2
3
4
5
6
7
8
9
10#include <linux/err.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/mutex.h>
15#include <linux/math64.h>
16#include <linux/sizes.h>
17#include <linux/slab.h>
18#include <linux/sort.h>
19
20#include <linux/mtd/mtd.h>
21#include <linux/of_platform.h>
22#include <linux/spi/flash.h>
23#include <linux/mtd/spi-nor.h>
24
25
26
27
28
29
30
31#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
32
33
34
35
36
37#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
38
39#define SPI_NOR_MAX_ID_LEN 6
40#define SPI_NOR_MAX_ADDR_WIDTH 4
41
42struct spi_nor_read_command {
43 u8 num_mode_clocks;
44 u8 num_wait_states;
45 u8 opcode;
46 enum spi_nor_protocol proto;
47};
48
49struct spi_nor_pp_command {
50 u8 opcode;
51 enum spi_nor_protocol proto;
52};
53
54enum spi_nor_read_command_index {
55 SNOR_CMD_READ,
56 SNOR_CMD_READ_FAST,
57 SNOR_CMD_READ_1_1_1_DTR,
58
59
60 SNOR_CMD_READ_1_1_2,
61 SNOR_CMD_READ_1_2_2,
62 SNOR_CMD_READ_2_2_2,
63 SNOR_CMD_READ_1_2_2_DTR,
64
65
66 SNOR_CMD_READ_1_1_4,
67 SNOR_CMD_READ_1_4_4,
68 SNOR_CMD_READ_4_4_4,
69 SNOR_CMD_READ_1_4_4_DTR,
70
71
72 SNOR_CMD_READ_1_1_8,
73 SNOR_CMD_READ_1_8_8,
74 SNOR_CMD_READ_8_8_8,
75 SNOR_CMD_READ_1_8_8_DTR,
76
77 SNOR_CMD_READ_MAX
78};
79
80enum spi_nor_pp_command_index {
81 SNOR_CMD_PP,
82
83
84 SNOR_CMD_PP_1_1_4,
85 SNOR_CMD_PP_1_4_4,
86 SNOR_CMD_PP_4_4_4,
87
88
89 SNOR_CMD_PP_1_1_8,
90 SNOR_CMD_PP_1_8_8,
91 SNOR_CMD_PP_8_8_8,
92
93 SNOR_CMD_PP_MAX
94};
95
96struct spi_nor_flash_parameter {
97 u64 size;
98 u32 page_size;
99
100 struct spi_nor_hwcaps hwcaps;
101 struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
102 struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
103
104 int (*quad_enable)(struct spi_nor *nor);
105};
106
107struct sfdp_parameter_header {
108 u8 id_lsb;
109 u8 minor;
110 u8 major;
111 u8 length;
112 u8 parameter_table_pointer[3];
113 u8 id_msb;
114};
115
116#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
117#define SFDP_PARAM_HEADER_PTP(p) \
118 (((p)->parameter_table_pointer[2] << 16) | \
119 ((p)->parameter_table_pointer[1] << 8) | \
120 ((p)->parameter_table_pointer[0] << 0))
121
122#define SFDP_BFPT_ID 0xff00
123#define SFDP_SECTOR_MAP_ID 0xff81
124#define SFDP_4BAIT_ID 0xff84
125
126#define SFDP_SIGNATURE 0x50444653U
127#define SFDP_JESD216_MAJOR 1
128#define SFDP_JESD216_MINOR 0
129#define SFDP_JESD216A_MINOR 5
130#define SFDP_JESD216B_MINOR 6
131
132struct sfdp_header {
133 u32 signature;
134 u8 minor;
135 u8 major;
136 u8 nph;
137 u8 unused;
138
139
140 struct sfdp_parameter_header bfpt_header;
141};
142
143
144
145
146
147
148
149#define BFPT_DWORD(i) ((i) - 1)
150#define BFPT_DWORD_MAX 16
151
152
153#define BFPT_DWORD_MAX_JESD216 9
154
155
156#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
157#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
158#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
159#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
160#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
161#define BFPT_DWORD1_DTR BIT(19)
162#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
163#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
164#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
165
166
167#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
168#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
169
170
171#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
172#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
208#define BFPT_DWORD15_QER_NONE (0x0UL << 20)
209#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
210#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20)
211#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
212#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
213#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20)
214
215struct sfdp_bfpt {
216 u32 dwords[BFPT_DWORD_MAX];
217};
218
219
220
221
222
223
224
225
226struct spi_nor_fixups {
227 int (*post_bfpt)(struct spi_nor *nor,
228 const struct sfdp_parameter_header *bfpt_header,
229 const struct sfdp_bfpt *bfpt,
230 struct spi_nor_flash_parameter *params);
231};
232
233struct flash_info {
234 char *name;
235
236
237
238
239
240
241 u8 id[SPI_NOR_MAX_ID_LEN];
242 u8 id_len;
243
244
245
246
247 unsigned sector_size;
248 u16 n_sectors;
249
250 u16 page_size;
251 u16 addr_width;
252
253 u16 flags;
254#define SECT_4K BIT(0)
255#define SPI_NOR_NO_ERASE BIT(1)
256#define SST_WRITE BIT(2)
257#define SPI_NOR_NO_FR BIT(3)
258#define SECT_4K_PMC BIT(4)
259#define SPI_NOR_DUAL_READ BIT(5)
260#define SPI_NOR_QUAD_READ BIT(6)
261#define USE_FSR BIT(7)
262#define SPI_NOR_HAS_LOCK BIT(8)
263#define SPI_NOR_HAS_TB BIT(9)
264
265
266
267
268#define SPI_S3AN BIT(10)
269
270
271
272
273
274#define SPI_NOR_4B_OPCODES BIT(11)
275
276
277
278#define NO_CHIP_ERASE BIT(12)
279#define SPI_NOR_SKIP_SFDP BIT(13)
280#define USE_CLSR BIT(14)
281#define SPI_NOR_OCTAL_READ BIT(15)
282
283
284 const struct spi_nor_fixups *fixups;
285
286 int (*quad_enable)(struct spi_nor *nor);
287};
288
289#define JEDEC_MFR(info) ((info)->id[0])
290
291
292
293
294
295
296static int read_sr(struct spi_nor *nor)
297{
298 int ret;
299 u8 val;
300
301 ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
302 if (ret < 0) {
303 pr_err("error %d reading SR\n", (int) ret);
304 return ret;
305 }
306
307 return val;
308}
309
310
311
312
313
314
315static int read_fsr(struct spi_nor *nor)
316{
317 int ret;
318 u8 val;
319
320 ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
321 if (ret < 0) {
322 pr_err("error %d reading FSR\n", ret);
323 return ret;
324 }
325
326 return val;
327}
328
329
330
331
332
333
334static int read_cr(struct spi_nor *nor)
335{
336 int ret;
337 u8 val;
338
339 ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
340 if (ret < 0) {
341 dev_err(nor->dev, "error %d reading CR\n", ret);
342 return ret;
343 }
344
345 return val;
346}
347
348
349
350
351
352static int write_sr(struct spi_nor *nor, u8 val)
353{
354 nor->cmd_buf[0] = val;
355 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
356}
357
358
359
360
361
362static int write_enable(struct spi_nor *nor)
363{
364 return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
365}
366
367
368
369
370static int write_disable(struct spi_nor *nor)
371{
372 return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
373}
374
375static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
376{
377 return mtd->priv;
378}
379
380
381static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
382{
383 size_t i;
384
385 for (i = 0; i < size; i++)
386 if (table[i][0] == opcode)
387 return table[i][1];
388
389
390 return opcode;
391}
392
393static u8 spi_nor_convert_3to4_read(u8 opcode)
394{
395 static const u8 spi_nor_3to4_read[][2] = {
396 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
397 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
398 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
399 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
400 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
401 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
402 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
403 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
404
405 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
406 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
407 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
408 };
409
410 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
411 ARRAY_SIZE(spi_nor_3to4_read));
412}
413
414static u8 spi_nor_convert_3to4_program(u8 opcode)
415{
416 static const u8 spi_nor_3to4_program[][2] = {
417 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
418 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
419 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
420 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
421 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
422 };
423
424 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
425 ARRAY_SIZE(spi_nor_3to4_program));
426}
427
428static u8 spi_nor_convert_3to4_erase(u8 opcode)
429{
430 static const u8 spi_nor_3to4_erase[][2] = {
431 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
432 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
433 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
434 };
435
436 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
437 ARRAY_SIZE(spi_nor_3to4_erase));
438}
439
440static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
441{
442
443 switch (JEDEC_MFR(nor->info)) {
444 case SNOR_MFR_SPANSION:
445
446 nor->erase_opcode = SPINOR_OP_SE;
447 nor->mtd.erasesize = nor->info->sector_size;
448 break;
449
450 default:
451 break;
452 }
453
454 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
455 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
456 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
457
458 if (!spi_nor_has_uniform_erase(nor)) {
459 struct spi_nor_erase_map *map = &nor->erase_map;
460 struct spi_nor_erase_type *erase;
461 int i;
462
463 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
464 erase = &map->erase_type[i];
465 erase->opcode =
466 spi_nor_convert_3to4_erase(erase->opcode);
467 }
468 }
469}
470
471
472static int set_4byte(struct spi_nor *nor, bool enable)
473{
474 int status;
475 bool need_wren = false;
476 u8 cmd;
477
478 switch (JEDEC_MFR(nor->info)) {
479 case SNOR_MFR_ST:
480 case SNOR_MFR_MICRON:
481
482 need_wren = true;
483
484 case SNOR_MFR_MACRONIX:
485 case SNOR_MFR_WINBOND:
486 if (need_wren)
487 write_enable(nor);
488
489 cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
490 status = nor->write_reg(nor, cmd, NULL, 0);
491 if (need_wren)
492 write_disable(nor);
493
494 if (!status && !enable &&
495 JEDEC_MFR(nor->info) == SNOR_MFR_WINBOND) {
496
497
498
499
500
501
502 write_enable(nor);
503 nor->cmd_buf[0] = 0;
504 nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
505 write_disable(nor);
506 }
507
508 return status;
509 default:
510
511 nor->cmd_buf[0] = enable << 7;
512 return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
513 }
514}
515
516static int s3an_sr_ready(struct spi_nor *nor)
517{
518 int ret;
519 u8 val;
520
521 ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
522 if (ret < 0) {
523 dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
524 return ret;
525 }
526
527 return !!(val & XSR_RDY);
528}
529
530static int spi_nor_sr_ready(struct spi_nor *nor)
531{
532 int sr = read_sr(nor);
533 if (sr < 0)
534 return sr;
535
536 if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
537 if (sr & SR_E_ERR)
538 dev_err(nor->dev, "Erase Error occurred\n");
539 else
540 dev_err(nor->dev, "Programming Error occurred\n");
541
542 nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
543 return -EIO;
544 }
545
546 return !(sr & SR_WIP);
547}
548
549static int spi_nor_fsr_ready(struct spi_nor *nor)
550{
551 int fsr = read_fsr(nor);
552 if (fsr < 0)
553 return fsr;
554
555 if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
556 if (fsr & FSR_E_ERR)
557 dev_err(nor->dev, "Erase operation failed.\n");
558 else
559 dev_err(nor->dev, "Program operation failed.\n");
560
561 if (fsr & FSR_PT_ERR)
562 dev_err(nor->dev,
563 "Attempted to modify a protected sector.\n");
564
565 nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
566 return -EIO;
567 }
568
569 return fsr & FSR_READY;
570}
571
572static int spi_nor_ready(struct spi_nor *nor)
573{
574 int sr, fsr;
575
576 if (nor->flags & SNOR_F_READY_XSR_RDY)
577 sr = s3an_sr_ready(nor);
578 else
579 sr = spi_nor_sr_ready(nor);
580 if (sr < 0)
581 return sr;
582 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
583 if (fsr < 0)
584 return fsr;
585 return sr && fsr;
586}
587
588
589
590
591
592static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
593 unsigned long timeout_jiffies)
594{
595 unsigned long deadline;
596 int timeout = 0, ret;
597
598 deadline = jiffies + timeout_jiffies;
599
600 while (!timeout) {
601 if (time_after_eq(jiffies, deadline))
602 timeout = 1;
603
604 ret = spi_nor_ready(nor);
605 if (ret < 0)
606 return ret;
607 if (ret)
608 return 0;
609
610 cond_resched();
611 }
612
613 dev_err(nor->dev, "flash operation timed out\n");
614
615 return -ETIMEDOUT;
616}
617
618static int spi_nor_wait_till_ready(struct spi_nor *nor)
619{
620 return spi_nor_wait_till_ready_with_timeout(nor,
621 DEFAULT_READY_WAIT_JIFFIES);
622}
623
624
625
626
627
628
629static int erase_chip(struct spi_nor *nor)
630{
631 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
632
633 return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
634}
635
636static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
637{
638 int ret = 0;
639
640 mutex_lock(&nor->lock);
641
642 if (nor->prepare) {
643 ret = nor->prepare(nor, ops);
644 if (ret) {
645 dev_err(nor->dev, "failed in the preparation.\n");
646 mutex_unlock(&nor->lock);
647 return ret;
648 }
649 }
650 return ret;
651}
652
653static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
654{
655 if (nor->unprepare)
656 nor->unprepare(nor, ops);
657 mutex_unlock(&nor->lock);
658}
659
660
661
662
663
664
665
666
667
668
669static loff_t spi_nor_s3an_addr_convert(struct spi_nor *nor, unsigned int addr)
670{
671 unsigned int offset;
672 unsigned int page;
673
674 offset = addr % nor->page_size;
675 page = addr / nor->page_size;
676 page <<= (nor->page_size > 512) ? 10 : 9;
677
678 return page | offset;
679}
680
681
682
683
684static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
685{
686 u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
687 int i;
688
689 if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
690 addr = spi_nor_s3an_addr_convert(nor, addr);
691
692 if (nor->erase)
693 return nor->erase(nor, addr);
694
695
696
697
698
699 for (i = nor->addr_width - 1; i >= 0; i--) {
700 buf[i] = addr & 0xff;
701 addr >>= 8;
702 }
703
704 return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
705}
706
707
708
709
710
711
712
713
714
715static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
716 u64 dividend, u32 *remainder)
717{
718
719 *remainder = (u32)dividend & erase->size_mask;
720 return dividend >> erase->size_shift;
721}
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736static const struct spi_nor_erase_type *
737spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
738 const struct spi_nor_erase_region *region,
739 u64 addr, u32 len)
740{
741 const struct spi_nor_erase_type *erase;
742 u32 rem;
743 int i;
744 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
745
746
747
748
749
750 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
751
752 if (!(erase_mask & BIT(i)))
753 continue;
754
755 erase = &map->erase_type[i];
756
757
758 if (erase->size > len)
759 continue;
760
761
762 if (region->offset & SNOR_OVERLAID_REGION)
763 return erase;
764
765 spi_nor_div_by_erase_size(erase, addr, &rem);
766 if (rem)
767 continue;
768 else
769 return erase;
770 }
771
772 return NULL;
773}
774
775
776
777
778
779
780
781static struct spi_nor_erase_region *
782spi_nor_region_next(struct spi_nor_erase_region *region)
783{
784 if (spi_nor_region_is_last(region))
785 return NULL;
786 region++;
787 return region;
788}
789
790
791
792
793
794
795
796
797
798
799static struct spi_nor_erase_region *
800spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
801{
802 struct spi_nor_erase_region *region = map->regions;
803 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
804 u64 region_end = region_start + region->size;
805
806 while (addr < region_start || addr >= region_end) {
807 region = spi_nor_region_next(region);
808 if (!region)
809 return ERR_PTR(-EINVAL);
810
811 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
812 region_end = region_start + region->size;
813 }
814
815 return region;
816}
817
818
819
820
821
822
823
824
825
826static struct spi_nor_erase_command *
827spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
828 const struct spi_nor_erase_type *erase)
829{
830 struct spi_nor_erase_command *cmd;
831
832 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
833 if (!cmd)
834 return ERR_PTR(-ENOMEM);
835
836 INIT_LIST_HEAD(&cmd->list);
837 cmd->opcode = erase->opcode;
838 cmd->count = 1;
839
840 if (region->offset & SNOR_OVERLAID_REGION)
841 cmd->size = region->size;
842 else
843 cmd->size = erase->size;
844
845 return cmd;
846}
847
848
849
850
851
852static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
853{
854 struct spi_nor_erase_command *cmd, *next;
855
856 list_for_each_entry_safe(cmd, next, erase_list, list) {
857 list_del(&cmd->list);
858 kfree(cmd);
859 }
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
876 struct list_head *erase_list,
877 u64 addr, u32 len)
878{
879 const struct spi_nor_erase_map *map = &nor->erase_map;
880 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
881 struct spi_nor_erase_region *region;
882 struct spi_nor_erase_command *cmd = NULL;
883 u64 region_end;
884 int ret = -EINVAL;
885
886 region = spi_nor_find_erase_region(map, addr);
887 if (IS_ERR(region))
888 return PTR_ERR(region);
889
890 region_end = spi_nor_region_end(region);
891
892 while (len) {
893 erase = spi_nor_find_best_erase_type(map, region, addr, len);
894 if (!erase)
895 goto destroy_erase_cmd_list;
896
897 if (prev_erase != erase ||
898 region->offset & SNOR_OVERLAID_REGION) {
899 cmd = spi_nor_init_erase_cmd(region, erase);
900 if (IS_ERR(cmd)) {
901 ret = PTR_ERR(cmd);
902 goto destroy_erase_cmd_list;
903 }
904
905 list_add_tail(&cmd->list, erase_list);
906 } else {
907 cmd->count++;
908 }
909
910 addr += cmd->size;
911 len -= cmd->size;
912
913 if (len && addr >= region_end) {
914 region = spi_nor_region_next(region);
915 if (!region)
916 goto destroy_erase_cmd_list;
917 region_end = spi_nor_region_end(region);
918 }
919
920 prev_erase = erase;
921 }
922
923 return 0;
924
925destroy_erase_cmd_list:
926 spi_nor_destroy_erase_cmd_list(erase_list);
927 return ret;
928}
929
930
931
932
933
934
935
936
937
938
939
940
941static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
942{
943 LIST_HEAD(erase_list);
944 struct spi_nor_erase_command *cmd, *next;
945 int ret;
946
947 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
948 if (ret)
949 return ret;
950
951 list_for_each_entry_safe(cmd, next, &erase_list, list) {
952 nor->erase_opcode = cmd->opcode;
953 while (cmd->count) {
954 write_enable(nor);
955
956 ret = spi_nor_erase_sector(nor, addr);
957 if (ret)
958 goto destroy_erase_cmd_list;
959
960 addr += cmd->size;
961 cmd->count--;
962
963 ret = spi_nor_wait_till_ready(nor);
964 if (ret)
965 goto destroy_erase_cmd_list;
966 }
967 list_del(&cmd->list);
968 kfree(cmd);
969 }
970
971 return 0;
972
973destroy_erase_cmd_list:
974 spi_nor_destroy_erase_cmd_list(&erase_list);
975 return ret;
976}
977
978
979
980
981
982static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
983{
984 struct spi_nor *nor = mtd_to_spi_nor(mtd);
985 u32 addr, len;
986 uint32_t rem;
987 int ret;
988
989 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
990 (long long)instr->len);
991
992 if (spi_nor_has_uniform_erase(nor)) {
993 div_u64_rem(instr->len, mtd->erasesize, &rem);
994 if (rem)
995 return -EINVAL;
996 }
997
998 addr = instr->addr;
999 len = instr->len;
1000
1001 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
1002 if (ret)
1003 return ret;
1004
1005
1006 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1007 unsigned long timeout;
1008
1009 write_enable(nor);
1010
1011 if (erase_chip(nor)) {
1012 ret = -EIO;
1013 goto erase_err;
1014 }
1015
1016
1017
1018
1019
1020
1021
1022 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1023 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1024 (unsigned long)(mtd->size / SZ_2M));
1025 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1026 if (ret)
1027 goto erase_err;
1028
1029
1030
1031
1032
1033
1034
1035 } else if (spi_nor_has_uniform_erase(nor)) {
1036 while (len) {
1037 write_enable(nor);
1038
1039 ret = spi_nor_erase_sector(nor, addr);
1040 if (ret)
1041 goto erase_err;
1042
1043 addr += mtd->erasesize;
1044 len -= mtd->erasesize;
1045
1046 ret = spi_nor_wait_till_ready(nor);
1047 if (ret)
1048 goto erase_err;
1049 }
1050
1051
1052 } else {
1053 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1054 if (ret)
1055 goto erase_err;
1056 }
1057
1058 write_disable(nor);
1059
1060erase_err:
1061 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
1062
1063 return ret;
1064}
1065
1066
1067static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
1068{
1069 int ret;
1070
1071 write_enable(nor);
1072 ret = write_sr(nor, status_new);
1073 if (ret)
1074 return ret;
1075
1076 ret = spi_nor_wait_till_ready(nor);
1077 if (ret)
1078 return ret;
1079
1080 ret = read_sr(nor);
1081 if (ret < 0)
1082 return ret;
1083
1084 return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
1085}
1086
1087static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
1088 uint64_t *len)
1089{
1090 struct mtd_info *mtd = &nor->mtd;
1091 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1092 int shift = ffs(mask) - 1;
1093 int pow;
1094
1095 if (!(sr & mask)) {
1096
1097 *ofs = 0;
1098 *len = 0;
1099 } else {
1100 pow = ((sr & mask) ^ mask) >> shift;
1101 *len = mtd->size >> pow;
1102 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
1103 *ofs = 0;
1104 else
1105 *ofs = mtd->size - *len;
1106 }
1107}
1108
1109
1110
1111
1112
1113static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1114 u8 sr, bool locked)
1115{
1116 loff_t lock_offs;
1117 uint64_t lock_len;
1118
1119 if (!len)
1120 return 1;
1121
1122 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
1123
1124 if (locked)
1125
1126 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1127 else
1128
1129 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1130}
1131
1132static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1133 u8 sr)
1134{
1135 return stm_check_lock_status_sr(nor, ofs, len, sr, true);
1136}
1137
1138static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1139 u8 sr)
1140{
1141 return stm_check_lock_status_sr(nor, ofs, len, sr, false);
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1177{
1178 struct mtd_info *mtd = &nor->mtd;
1179 int status_old, status_new;
1180 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1181 u8 shift = ffs(mask) - 1, pow, val;
1182 loff_t lock_len;
1183 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1184 bool use_top;
1185
1186 status_old = read_sr(nor);
1187 if (status_old < 0)
1188 return status_old;
1189
1190
1191 if (stm_is_locked_sr(nor, ofs, len, status_old))
1192 return 0;
1193
1194
1195 if (!stm_is_locked_sr(nor, 0, ofs, status_old))
1196 can_be_bottom = false;
1197
1198
1199 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1200 status_old))
1201 can_be_top = false;
1202
1203 if (!can_be_bottom && !can_be_top)
1204 return -EINVAL;
1205
1206
1207 use_top = can_be_top;
1208
1209
1210 if (use_top)
1211 lock_len = mtd->size - ofs;
1212 else
1213 lock_len = ofs + len;
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224 pow = ilog2(mtd->size) - ilog2(lock_len);
1225 val = mask - (pow << shift);
1226 if (val & ~mask)
1227 return -EINVAL;
1228
1229 if (!(val & mask))
1230 return -EINVAL;
1231
1232 status_new = (status_old & ~mask & ~SR_TB) | val;
1233
1234
1235 status_new |= SR_SRWD;
1236
1237 if (!use_top)
1238 status_new |= SR_TB;
1239
1240
1241 if (status_new == status_old)
1242 return 0;
1243
1244
1245 if ((status_new & mask) < (status_old & mask))
1246 return -EINVAL;
1247
1248 return write_sr_and_check(nor, status_new, mask);
1249}
1250
1251
1252
1253
1254
1255
1256static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1257{
1258 struct mtd_info *mtd = &nor->mtd;
1259 int status_old, status_new;
1260 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1261 u8 shift = ffs(mask) - 1, pow, val;
1262 loff_t lock_len;
1263 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1264 bool use_top;
1265
1266 status_old = read_sr(nor);
1267 if (status_old < 0)
1268 return status_old;
1269
1270
1271 if (stm_is_unlocked_sr(nor, ofs, len, status_old))
1272 return 0;
1273
1274
1275 if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
1276 can_be_top = false;
1277
1278
1279 if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1280 status_old))
1281 can_be_bottom = false;
1282
1283 if (!can_be_bottom && !can_be_top)
1284 return -EINVAL;
1285
1286
1287 use_top = can_be_top;
1288
1289
1290 if (use_top)
1291 lock_len = mtd->size - (ofs + len);
1292 else
1293 lock_len = ofs;
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304 pow = ilog2(mtd->size) - order_base_2(lock_len);
1305 if (lock_len == 0) {
1306 val = 0;
1307 } else {
1308 val = mask - (pow << shift);
1309
1310 if (val & ~mask)
1311 return -EINVAL;
1312 }
1313
1314 status_new = (status_old & ~mask & ~SR_TB) | val;
1315
1316
1317 if (lock_len == 0)
1318 status_new &= ~SR_SRWD;
1319
1320 if (!use_top)
1321 status_new |= SR_TB;
1322
1323
1324 if (status_new == status_old)
1325 return 0;
1326
1327
1328 if ((status_new & mask) > (status_old & mask))
1329 return -EINVAL;
1330
1331 return write_sr_and_check(nor, status_new, mask);
1332}
1333
1334
1335
1336
1337
1338
1339
1340
1341static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1342{
1343 int status;
1344
1345 status = read_sr(nor);
1346 if (status < 0)
1347 return status;
1348
1349 return stm_is_locked_sr(nor, ofs, len, status);
1350}
1351
1352static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1353{
1354 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1355 int ret;
1356
1357 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
1358 if (ret)
1359 return ret;
1360
1361 ret = nor->flash_lock(nor, ofs, len);
1362
1363 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
1364 return ret;
1365}
1366
1367static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1368{
1369 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1370 int ret;
1371
1372 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1373 if (ret)
1374 return ret;
1375
1376 ret = nor->flash_unlock(nor, ofs, len);
1377
1378 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1379 return ret;
1380}
1381
1382static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1383{
1384 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1385 int ret;
1386
1387 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1388 if (ret)
1389 return ret;
1390
1391 ret = nor->flash_is_locked(nor, ofs, len);
1392
1393 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1394 return ret;
1395}
1396
1397
1398
1399
1400
1401
1402
1403static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
1404{
1405 int ret;
1406
1407 write_enable(nor);
1408
1409 ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
1410 if (ret < 0) {
1411 dev_err(nor->dev,
1412 "error while writing configuration register\n");
1413 return -EINVAL;
1414 }
1415
1416 ret = spi_nor_wait_till_ready(nor);
1417 if (ret) {
1418 dev_err(nor->dev,
1419 "timeout while writing configuration register\n");
1420 return ret;
1421 }
1422
1423 return 0;
1424}
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436static int macronix_quad_enable(struct spi_nor *nor)
1437{
1438 int ret, val;
1439
1440 val = read_sr(nor);
1441 if (val < 0)
1442 return val;
1443 if (val & SR_QUAD_EN_MX)
1444 return 0;
1445
1446 write_enable(nor);
1447
1448 write_sr(nor, val | SR_QUAD_EN_MX);
1449
1450 ret = spi_nor_wait_till_ready(nor);
1451 if (ret)
1452 return ret;
1453
1454 ret = read_sr(nor);
1455 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1456 dev_err(nor->dev, "Macronix Quad bit not set\n");
1457 return -EINVAL;
1458 }
1459
1460 return 0;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486static int spansion_quad_enable(struct spi_nor *nor)
1487{
1488 u8 sr_cr[2] = {0, CR_QUAD_EN_SPAN};
1489 int ret;
1490
1491 ret = write_sr_cr(nor, sr_cr);
1492 if (ret)
1493 return ret;
1494
1495
1496 ret = read_cr(nor);
1497 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1498 dev_err(nor->dev, "Spansion Quad bit not set\n");
1499 return -EINVAL;
1500 }
1501
1502 return 0;
1503}
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
1519{
1520 u8 sr_cr[2];
1521 int ret;
1522
1523
1524 ret = read_sr(nor);
1525 if (ret < 0) {
1526 dev_err(nor->dev, "error while reading status register\n");
1527 return -EINVAL;
1528 }
1529 sr_cr[0] = ret;
1530 sr_cr[1] = CR_QUAD_EN_SPAN;
1531
1532 return write_sr_cr(nor, sr_cr);
1533}
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548static int spansion_read_cr_quad_enable(struct spi_nor *nor)
1549{
1550 struct device *dev = nor->dev;
1551 u8 sr_cr[2];
1552 int ret;
1553
1554
1555 ret = read_cr(nor);
1556 if (ret < 0) {
1557 dev_err(dev, "error while reading configuration register\n");
1558 return -EINVAL;
1559 }
1560
1561 if (ret & CR_QUAD_EN_SPAN)
1562 return 0;
1563
1564 sr_cr[1] = ret | CR_QUAD_EN_SPAN;
1565
1566
1567 ret = read_sr(nor);
1568 if (ret < 0) {
1569 dev_err(dev, "error while reading status register\n");
1570 return -EINVAL;
1571 }
1572 sr_cr[0] = ret;
1573
1574 ret = write_sr_cr(nor, sr_cr);
1575 if (ret)
1576 return ret;
1577
1578
1579 ret = read_cr(nor);
1580 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1581 dev_err(nor->dev, "Spansion Quad bit not set\n");
1582 return -EINVAL;
1583 }
1584
1585 return 0;
1586}
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600static int sr2_bit7_quad_enable(struct spi_nor *nor)
1601{
1602 u8 sr2;
1603 int ret;
1604
1605
1606 ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
1607 if (ret)
1608 return ret;
1609 if (sr2 & SR2_QUAD_EN_BIT7)
1610 return 0;
1611
1612
1613 sr2 |= SR2_QUAD_EN_BIT7;
1614
1615 write_enable(nor);
1616
1617 ret = nor->write_reg(nor, SPINOR_OP_WRSR2, &sr2, 1);
1618 if (ret < 0) {
1619 dev_err(nor->dev, "error while writing status register 2\n");
1620 return -EINVAL;
1621 }
1622
1623 ret = spi_nor_wait_till_ready(nor);
1624 if (ret < 0) {
1625 dev_err(nor->dev, "timeout while writing status register 2\n");
1626 return ret;
1627 }
1628
1629
1630 ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
1631 if (!(ret > 0 && (sr2 & SR2_QUAD_EN_BIT7))) {
1632 dev_err(nor->dev, "SR2 Quad bit not set\n");
1633 return -EINVAL;
1634 }
1635
1636 return 0;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648static int spi_nor_clear_sr_bp(struct spi_nor *nor)
1649{
1650 int ret;
1651 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1652
1653 ret = read_sr(nor);
1654 if (ret < 0) {
1655 dev_err(nor->dev, "error while reading status register\n");
1656 return ret;
1657 }
1658
1659 write_enable(nor);
1660
1661 ret = write_sr(nor, ret & ~mask);
1662 if (ret) {
1663 dev_err(nor->dev, "write to status register failed\n");
1664 return ret;
1665 }
1666
1667 ret = spi_nor_wait_till_ready(nor);
1668 if (ret)
1669 dev_err(nor->dev, "timeout while writing status register\n");
1670 return ret;
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
1687{
1688 int ret;
1689 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1690 u8 sr_cr[2] = {0};
1691
1692
1693 ret = read_cr(nor);
1694 if (ret < 0) {
1695 dev_err(nor->dev,
1696 "error while reading configuration register\n");
1697 return ret;
1698 }
1699
1700
1701
1702
1703
1704 if (ret & CR_QUAD_EN_SPAN) {
1705 sr_cr[1] = ret;
1706
1707 ret = read_sr(nor);
1708 if (ret < 0) {
1709 dev_err(nor->dev,
1710 "error while reading status register\n");
1711 return ret;
1712 }
1713 sr_cr[0] = ret & ~mask;
1714
1715 ret = write_sr_cr(nor, sr_cr);
1716 if (ret)
1717 dev_err(nor->dev, "16-bit write register failed\n");
1718 return ret;
1719 }
1720
1721
1722
1723
1724
1725 return spi_nor_clear_sr_bp(nor);
1726}
1727
1728
1729#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
1730 .id = { \
1731 ((_jedec_id) >> 16) & 0xff, \
1732 ((_jedec_id) >> 8) & 0xff, \
1733 (_jedec_id) & 0xff, \
1734 ((_ext_id) >> 8) & 0xff, \
1735 (_ext_id) & 0xff, \
1736 }, \
1737 .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
1738 .sector_size = (_sector_size), \
1739 .n_sectors = (_n_sectors), \
1740 .page_size = 256, \
1741 .flags = (_flags),
1742
1743#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
1744 .id = { \
1745 ((_jedec_id) >> 16) & 0xff, \
1746 ((_jedec_id) >> 8) & 0xff, \
1747 (_jedec_id) & 0xff, \
1748 ((_ext_id) >> 16) & 0xff, \
1749 ((_ext_id) >> 8) & 0xff, \
1750 (_ext_id) & 0xff, \
1751 }, \
1752 .id_len = 6, \
1753 .sector_size = (_sector_size), \
1754 .n_sectors = (_n_sectors), \
1755 .page_size = 256, \
1756 .flags = (_flags),
1757
1758#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
1759 .sector_size = (_sector_size), \
1760 .n_sectors = (_n_sectors), \
1761 .page_size = (_page_size), \
1762 .addr_width = (_addr_width), \
1763 .flags = (_flags),
1764
1765#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
1766 .id = { \
1767 ((_jedec_id) >> 16) & 0xff, \
1768 ((_jedec_id) >> 8) & 0xff, \
1769 (_jedec_id) & 0xff \
1770 }, \
1771 .id_len = 3, \
1772 .sector_size = (8*_page_size), \
1773 .n_sectors = (_n_sectors), \
1774 .page_size = _page_size, \
1775 .addr_width = 3, \
1776 .flags = SPI_NOR_NO_FR | SPI_S3AN,
1777
1778static int
1779is25lp256_post_bfpt_fixups(struct spi_nor *nor,
1780 const struct sfdp_parameter_header *bfpt_header,
1781 const struct sfdp_bfpt *bfpt,
1782 struct spi_nor_flash_parameter *params)
1783{
1784
1785
1786
1787
1788
1789 if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
1790 BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
1791 nor->addr_width = 4;
1792
1793 return 0;
1794}
1795
1796static struct spi_nor_fixups is25lp256_fixups = {
1797 .post_bfpt = is25lp256_post_bfpt_fixups,
1798};
1799
1800static int
1801mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
1802 const struct sfdp_parameter_header *bfpt_header,
1803 const struct sfdp_bfpt *bfpt,
1804 struct spi_nor_flash_parameter *params)
1805{
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815 if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
1816 nor->flags |= SNOR_F_4B_OPCODES;
1817
1818 return 0;
1819}
1820
1821static struct spi_nor_fixups mx25l25635_fixups = {
1822 .post_bfpt = mx25l25635_post_bfpt_fixups,
1823};
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836static const struct flash_info spi_nor_ids[] = {
1837
1838 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
1839 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
1840
1841 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
1842 { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
1843 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
1844 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
1845
1846 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
1847 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
1848 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
1849 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
1850
1851 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
1852
1853
1854 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
1855 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
1856 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
1857 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
1858 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
1859 { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
1860 SECT_4K | SPI_NOR_DUAL_READ) },
1861 { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
1862 { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
1863 SECT_4K | SPI_NOR_DUAL_READ) },
1864 { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
1865 { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
1866 { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
1867
1868
1869 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
1870 { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
1871 { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
1872
1873
1874 { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1875 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1876 { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1877 { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1878
1879
1880 { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
1881
1882
1883 {
1884 "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
1885 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1886 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1887 },
1888 {
1889 "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
1890 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1891 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1892 },
1893 {
1894 "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
1895 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1896 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1897 },
1898 {
1899 "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
1900 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1901 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1902 },
1903 {
1904 "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
1905 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1906 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1907 },
1908 {
1909 "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
1910 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1911 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1912 },
1913 {
1914 "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
1915 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1916 SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1917 .quad_enable = macronix_quad_enable,
1918 },
1919
1920
1921 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
1922 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
1923 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
1924
1925
1926 { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
1927 { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
1928 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1929 { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
1930 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1931 { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
1932 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1933 { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
1934 SECT_4K | SPI_NOR_DUAL_READ) },
1935 { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
1936 SECT_4K | SPI_NOR_DUAL_READ) },
1937 { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
1938 SECT_4K | SPI_NOR_DUAL_READ) },
1939 { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
1940 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1941 SPI_NOR_4B_OPCODES)
1942 .fixups = &is25lp256_fixups },
1943 { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
1944 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1945 { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
1946 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1947 { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
1948 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1949
1950
1951 { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
1952 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
1953 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
1954 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
1955 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
1956 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
1957 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
1958 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
1959 { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
1960 { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
1961 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1962 { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
1963 { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
1964 { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
1965 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
1966 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
1967 { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
1968 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1969 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
1970 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
1971 .fixups = &mx25l25635_fixups },
1972 { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
1973 { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
1974 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1975 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
1976 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
1977 { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
1978 { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1979 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
1980
1981
1982 { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
1983 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
1984 { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
1985 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
1986 { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
1987 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
1988 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
1989 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1990 { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
1991 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
1992 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
1993 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1994 { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1995 { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
1996 SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
1997 NO_CHIP_ERASE) },
1998 { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1999
2000
2001 {
2002 "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
2003 SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
2004 SPI_NOR_4B_OPCODES)
2005 },
2006
2007
2008 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
2009 { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
2010 { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
2011
2012
2013
2014
2015 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2016 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2017 { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
2018 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2019 { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
2020 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2021 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
2022 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2023 { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
2024 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2025 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | USE_CLSR) },
2026 { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2027 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
2028 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
2029 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
2030 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2031 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2032 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
2033 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
2034 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
2035 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
2036 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
2037 { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2038 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2039 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2040 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
2041 { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2042 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
2043 { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
2044 { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
2045 { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
2046 { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2047 { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2048 { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2049
2050
2051 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
2052 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2053 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
2054 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
2055 { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
2056 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
2057 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
2058 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
2059 { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
2060 { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
2061 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
2062 { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2063 { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2064
2065
2066 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
2067 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
2068 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
2069 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
2070 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
2071 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
2072 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
2073 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
2074 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
2075
2076 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
2077 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
2078 { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
2079 { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
2080 { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
2081 { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
2082 { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
2083 { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
2084 { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
2085
2086 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
2087 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
2088 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
2089
2090 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
2091 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
2092 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
2093
2094 { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
2095 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
2096 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
2097 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
2098 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
2099 { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
2100
2101
2102 { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
2103 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
2104 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
2105 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
2106 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
2107 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
2108 {
2109 "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
2110 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2111 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2112 },
2113 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
2114 {
2115 "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
2116 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2117 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2118 },
2119 { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
2120 { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
2121 { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
2122 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
2123 {
2124 "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
2125 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2126 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2127 },
2128 {
2129 "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
2130 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2131 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2132 },
2133 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
2134 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
2135 {
2136 "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
2137 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2138 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2139 },
2140 {
2141 "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
2142 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2143 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2144 },
2145 {
2146 "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
2147 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2148 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2149 },
2150 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
2151 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
2152 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
2153 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2154 { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
2155 SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
2156
2157
2158 { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2159 { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2160 { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2161 { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2162 { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2163
2164
2165 { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
2166 { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
2167 { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
2168 { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
2169 { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
2170
2171
2172 { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2173 { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2174 { },
2175};
2176
2177static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2178{
2179 int tmp;
2180 u8 id[SPI_NOR_MAX_ID_LEN];
2181 const struct flash_info *info;
2182
2183 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
2184 if (tmp < 0) {
2185 dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
2186 return ERR_PTR(tmp);
2187 }
2188
2189 for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
2190 info = &spi_nor_ids[tmp];
2191 if (info->id_len) {
2192 if (!memcmp(info->id, id, info->id_len))
2193 return &spi_nor_ids[tmp];
2194 }
2195 }
2196 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2197 SPI_NOR_MAX_ID_LEN, id);
2198 return ERR_PTR(-ENODEV);
2199}
2200
2201static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2202 size_t *retlen, u_char *buf)
2203{
2204 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2205 int ret;
2206
2207 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2208
2209 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
2210 if (ret)
2211 return ret;
2212
2213 while (len) {
2214 loff_t addr = from;
2215
2216 if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
2217 addr = spi_nor_s3an_addr_convert(nor, addr);
2218
2219 ret = nor->read(nor, addr, len, buf);
2220 if (ret == 0) {
2221
2222 ret = -EIO;
2223 goto read_err;
2224 }
2225 if (ret < 0)
2226 goto read_err;
2227
2228 WARN_ON(ret > len);
2229 *retlen += ret;
2230 buf += ret;
2231 from += ret;
2232 len -= ret;
2233 }
2234 ret = 0;
2235
2236read_err:
2237 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
2238 return ret;
2239}
2240
2241static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
2242 size_t *retlen, const u_char *buf)
2243{
2244 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2245 size_t actual;
2246 int ret;
2247
2248 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2249
2250 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2251 if (ret)
2252 return ret;
2253
2254 write_enable(nor);
2255
2256 nor->sst_write_second = false;
2257
2258 actual = to % 2;
2259
2260 if (actual) {
2261 nor->program_opcode = SPINOR_OP_BP;
2262
2263
2264 ret = nor->write(nor, to, 1, buf);
2265 if (ret < 0)
2266 goto sst_write_err;
2267 WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2268 (int)ret);
2269 ret = spi_nor_wait_till_ready(nor);
2270 if (ret)
2271 goto sst_write_err;
2272 }
2273 to += actual;
2274
2275
2276 for (; actual < len - 1; actual += 2) {
2277 nor->program_opcode = SPINOR_OP_AAI_WP;
2278
2279
2280 ret = nor->write(nor, to, 2, buf + actual);
2281 if (ret < 0)
2282 goto sst_write_err;
2283 WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
2284 (int)ret);
2285 ret = spi_nor_wait_till_ready(nor);
2286 if (ret)
2287 goto sst_write_err;
2288 to += 2;
2289 nor->sst_write_second = true;
2290 }
2291 nor->sst_write_second = false;
2292
2293 write_disable(nor);
2294 ret = spi_nor_wait_till_ready(nor);
2295 if (ret)
2296 goto sst_write_err;
2297
2298
2299 if (actual != len) {
2300 write_enable(nor);
2301
2302 nor->program_opcode = SPINOR_OP_BP;
2303 ret = nor->write(nor, to, 1, buf + actual);
2304 if (ret < 0)
2305 goto sst_write_err;
2306 WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2307 (int)ret);
2308 ret = spi_nor_wait_till_ready(nor);
2309 if (ret)
2310 goto sst_write_err;
2311 write_disable(nor);
2312 actual += 1;
2313 }
2314sst_write_err:
2315 *retlen += actual;
2316 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2317 return ret;
2318}
2319
2320
2321
2322
2323
2324
2325static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2326 size_t *retlen, const u_char *buf)
2327{
2328 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2329 size_t page_offset, page_remain, i;
2330 ssize_t ret;
2331
2332 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2333
2334 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2335 if (ret)
2336 return ret;
2337
2338 for (i = 0; i < len; ) {
2339 ssize_t written;
2340 loff_t addr = to + i;
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350 if (hweight32(nor->page_size) == 1) {
2351 page_offset = addr & (nor->page_size - 1);
2352 } else {
2353 uint64_t aux = addr;
2354
2355 page_offset = do_div(aux, nor->page_size);
2356 }
2357
2358 page_remain = min_t(size_t,
2359 nor->page_size - page_offset, len - i);
2360
2361 if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
2362 addr = spi_nor_s3an_addr_convert(nor, addr);
2363
2364 write_enable(nor);
2365 ret = nor->write(nor, addr, page_remain, buf + i);
2366 if (ret < 0)
2367 goto write_err;
2368 written = ret;
2369
2370 ret = spi_nor_wait_till_ready(nor);
2371 if (ret)
2372 goto write_err;
2373 *retlen += written;
2374 i += written;
2375 }
2376
2377write_err:
2378 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2379 return ret;
2380}
2381
2382static int spi_nor_check(struct spi_nor *nor)
2383{
2384 if (!nor->dev || !nor->read || !nor->write ||
2385 !nor->read_reg || !nor->write_reg) {
2386 pr_err("spi-nor: please fill all the necessary fields!\n");
2387 return -EINVAL;
2388 }
2389
2390 return 0;
2391}
2392
2393static int s3an_nor_scan(struct spi_nor *nor)
2394{
2395 int ret;
2396 u8 val;
2397
2398 ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
2399 if (ret < 0) {
2400 dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
2401 return ret;
2402 }
2403
2404 nor->erase_opcode = SPINOR_OP_XSE;
2405 nor->program_opcode = SPINOR_OP_XPP;
2406 nor->read_opcode = SPINOR_OP_READ;
2407 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420 if (val & XSR_PAGESIZE) {
2421
2422 nor->page_size = (nor->page_size == 264) ? 256 : 512;
2423 nor->mtd.writebufsize = nor->page_size;
2424 nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
2425 nor->mtd.erasesize = 8 * nor->page_size;
2426 } else {
2427
2428 nor->flags |= SNOR_F_S3AN_ADDR_DEFAULT;
2429 }
2430
2431 return 0;
2432}
2433
2434static void
2435spi_nor_set_read_settings(struct spi_nor_read_command *read,
2436 u8 num_mode_clocks,
2437 u8 num_wait_states,
2438 u8 opcode,
2439 enum spi_nor_protocol proto)
2440{
2441 read->num_mode_clocks = num_mode_clocks;
2442 read->num_wait_states = num_wait_states;
2443 read->opcode = opcode;
2444 read->proto = proto;
2445}
2446
2447static void
2448spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
2449 u8 opcode,
2450 enum spi_nor_protocol proto)
2451{
2452 pp->opcode = opcode;
2453 pp->proto = proto;
2454}
2455
2456static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2457{
2458 size_t i;
2459
2460 for (i = 0; i < size; i++)
2461 if (table[i][0] == (int)hwcaps)
2462 return table[i][1];
2463
2464 return -EINVAL;
2465}
2466
2467static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2468{
2469 static const int hwcaps_read2cmd[][2] = {
2470 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2471 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2472 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2473 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2474 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2475 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2476 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2477 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2478 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2479 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2480 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2481 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2482 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2483 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2484 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2485 };
2486
2487 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2488 ARRAY_SIZE(hwcaps_read2cmd));
2489}
2490
2491static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2492{
2493 static const int hwcaps_pp2cmd[][2] = {
2494 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2495 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2496 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2497 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2498 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2499 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2500 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2501 };
2502
2503 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2504 ARRAY_SIZE(hwcaps_pp2cmd));
2505}
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
2524{
2525 int ret;
2526
2527 while (len) {
2528 ret = nor->read(nor, addr, len, buf);
2529 if (!ret || ret > len)
2530 return -EIO;
2531 if (ret < 0)
2532 return ret;
2533
2534 buf += ret;
2535 addr += ret;
2536 len -= ret;
2537 }
2538 return 0;
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
2555 size_t len, void *buf)
2556{
2557 u8 addr_width, read_opcode, read_dummy;
2558 int ret;
2559
2560 read_opcode = nor->read_opcode;
2561 addr_width = nor->addr_width;
2562 read_dummy = nor->read_dummy;
2563
2564 nor->read_opcode = SPINOR_OP_RDSFDP;
2565 nor->addr_width = 3;
2566 nor->read_dummy = 8;
2567
2568 ret = spi_nor_read_raw(nor, addr, len, buf);
2569
2570 nor->read_opcode = read_opcode;
2571 nor->addr_width = addr_width;
2572 nor->read_dummy = read_dummy;
2573
2574 return ret;
2575}
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
2591 size_t len, void *buf)
2592{
2593 void *dma_safe_buf;
2594 int ret;
2595
2596 dma_safe_buf = kmalloc(len, GFP_KERNEL);
2597 if (!dma_safe_buf)
2598 return -ENOMEM;
2599
2600 ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
2601 memcpy(buf, dma_safe_buf, len);
2602 kfree(dma_safe_buf);
2603
2604 return ret;
2605}
2606
2607
2608
2609static void
2610spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
2611 u16 half,
2612 enum spi_nor_protocol proto)
2613{
2614 read->num_mode_clocks = (half >> 5) & 0x07;
2615 read->num_wait_states = (half >> 0) & 0x1f;
2616 read->opcode = (half >> 8) & 0xff;
2617 read->proto = proto;
2618}
2619
2620struct sfdp_bfpt_read {
2621
2622 u32 hwcaps;
2623
2624
2625
2626
2627
2628 u32 supported_dword;
2629 u32 supported_bit;
2630
2631
2632
2633
2634
2635
2636 u32 settings_dword;
2637 u32 settings_shift;
2638
2639
2640 enum spi_nor_protocol proto;
2641};
2642
2643static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
2644
2645 {
2646 SNOR_HWCAPS_READ_1_1_2,
2647 BFPT_DWORD(1), BIT(16),
2648 BFPT_DWORD(4), 0,
2649 SNOR_PROTO_1_1_2,
2650 },
2651
2652
2653 {
2654 SNOR_HWCAPS_READ_1_2_2,
2655 BFPT_DWORD(1), BIT(20),
2656 BFPT_DWORD(4), 16,
2657 SNOR_PROTO_1_2_2,
2658 },
2659
2660
2661 {
2662 SNOR_HWCAPS_READ_2_2_2,
2663 BFPT_DWORD(5), BIT(0),
2664 BFPT_DWORD(6), 16,
2665 SNOR_PROTO_2_2_2,
2666 },
2667
2668
2669 {
2670 SNOR_HWCAPS_READ_1_1_4,
2671 BFPT_DWORD(1), BIT(22),
2672 BFPT_DWORD(3), 16,
2673 SNOR_PROTO_1_1_4,
2674 },
2675
2676
2677 {
2678 SNOR_HWCAPS_READ_1_4_4,
2679 BFPT_DWORD(1), BIT(21),
2680 BFPT_DWORD(3), 0,
2681 SNOR_PROTO_1_4_4,
2682 },
2683
2684
2685 {
2686 SNOR_HWCAPS_READ_4_4_4,
2687 BFPT_DWORD(5), BIT(4),
2688 BFPT_DWORD(7), 16,
2689 SNOR_PROTO_4_4_4,
2690 },
2691};
2692
2693struct sfdp_bfpt_erase {
2694
2695
2696
2697
2698 u32 dword;
2699 u32 shift;
2700};
2701
2702static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
2703
2704 {BFPT_DWORD(8), 0},
2705
2706
2707 {BFPT_DWORD(8), 16},
2708
2709
2710 {BFPT_DWORD(9), 0},
2711
2712
2713 {BFPT_DWORD(9), 16},
2714};
2715
2716
2717
2718
2719
2720
2721
2722static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
2723 u32 size, u8 opcode)
2724{
2725 erase->size = size;
2726 erase->opcode = opcode;
2727
2728 erase->size_shift = ffs(erase->size) - 1;
2729 erase->size_mask = (1 << erase->size_shift) - 1;
2730}
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745static void
2746spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
2747 u32 size, u8 opcode, u8 i)
2748{
2749 erase->idx = i;
2750 spi_nor_set_erase_type(erase, size, opcode);
2751}
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
2765{
2766 const struct spi_nor_erase_type *left = l, *right = r;
2767
2768 return left->size - right->size;
2769}
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
2783{
2784 struct spi_nor_erase_type *erase_type = map->erase_type;
2785 int i;
2786 u8 sorted_erase_mask = 0;
2787
2788 if (!erase_mask)
2789 return 0;
2790
2791
2792 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
2793 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
2794 sorted_erase_mask |= BIT(i);
2795
2796 return sorted_erase_mask;
2797}
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
2812{
2813 struct spi_nor_erase_region *region = map->regions;
2814 u8 region_erase_mask, sorted_erase_mask;
2815
2816 while (region) {
2817 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
2818
2819 sorted_erase_mask = spi_nor_sort_erase_mask(map,
2820 region_erase_mask);
2821
2822
2823 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
2824 sorted_erase_mask;
2825
2826 region = spi_nor_region_next(region);
2827 }
2828}
2829
2830
2831
2832
2833
2834
2835
2836
2837static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2838 u8 erase_mask, u64 flash_size)
2839{
2840
2841 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2842 SNOR_LAST_REGION;
2843 map->uniform_region.size = flash_size;
2844 map->regions = &map->uniform_region;
2845 map->uniform_erase_type = erase_mask;
2846}
2847
2848static int
2849spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2850 const struct sfdp_parameter_header *bfpt_header,
2851 const struct sfdp_bfpt *bfpt,
2852 struct spi_nor_flash_parameter *params)
2853{
2854 if (nor->info->fixups && nor->info->fixups->post_bfpt)
2855 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
2856 params);
2857
2858 return 0;
2859}
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891static int spi_nor_parse_bfpt(struct spi_nor *nor,
2892 const struct sfdp_parameter_header *bfpt_header,
2893 struct spi_nor_flash_parameter *params)
2894{
2895 struct spi_nor_erase_map *map = &nor->erase_map;
2896 struct spi_nor_erase_type *erase_type = map->erase_type;
2897 struct sfdp_bfpt bfpt;
2898 size_t len;
2899 int i, cmd, err;
2900 u32 addr;
2901 u16 half;
2902 u8 erase_mask;
2903
2904
2905 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
2906 return -EINVAL;
2907
2908
2909 len = min_t(size_t, sizeof(bfpt),
2910 bfpt_header->length * sizeof(u32));
2911 addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
2912 memset(&bfpt, 0, sizeof(bfpt));
2913 err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
2914 if (err < 0)
2915 return err;
2916
2917
2918 for (i = 0; i < BFPT_DWORD_MAX; i++)
2919 bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
2920
2921
2922 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
2923 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
2924 nor->addr_width = 3;
2925 break;
2926
2927 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
2928 nor->addr_width = 4;
2929 break;
2930
2931 default:
2932 break;
2933 }
2934
2935
2936 params->size = bfpt.dwords[BFPT_DWORD(2)];
2937 if (params->size & BIT(31)) {
2938 params->size &= ~BIT(31);
2939
2940
2941
2942
2943
2944
2945 if (params->size > 63)
2946 return -EINVAL;
2947
2948 params->size = 1ULL << params->size;
2949 } else {
2950 params->size++;
2951 }
2952 params->size >>= 3;
2953
2954
2955 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
2956 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
2957 struct spi_nor_read_command *read;
2958
2959 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
2960 params->hwcaps.mask &= ~rd->hwcaps;
2961 continue;
2962 }
2963
2964 params->hwcaps.mask |= rd->hwcaps;
2965 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
2966 read = ¶ms->reads[cmd];
2967 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
2968 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
2969 }
2970
2971
2972
2973
2974
2975 erase_mask = 0;
2976 memset(&nor->erase_map, 0, sizeof(nor->erase_map));
2977 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
2978 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
2979 u32 erasesize;
2980 u8 opcode;
2981
2982 half = bfpt.dwords[er->dword] >> er->shift;
2983 erasesize = half & 0xff;
2984
2985
2986 if (!erasesize)
2987 continue;
2988
2989 erasesize = 1U << erasesize;
2990 opcode = (half >> 8) & 0xff;
2991 erase_mask |= BIT(i);
2992 spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
2993 opcode, i);
2994 }
2995 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2996
2997
2998
2999
3000 sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
3001 spi_nor_map_cmp_erase_type, NULL);
3002
3003
3004
3005
3006
3007 spi_nor_regions_sort_erase_types(map);
3008 map->uniform_erase_type = map->uniform_region.offset &
3009 SNOR_ERASE_TYPE_MASK;
3010
3011
3012 if (bfpt_header->length < BFPT_DWORD_MAX)
3013 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
3014 params);
3015
3016
3017 params->page_size = bfpt.dwords[BFPT_DWORD(11)];
3018 params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
3019 params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
3020 params->page_size = 1U << params->page_size;
3021
3022
3023 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
3024 case BFPT_DWORD15_QER_NONE:
3025 params->quad_enable = NULL;
3026 break;
3027
3028 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
3029 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
3030 params->quad_enable = spansion_no_read_cr_quad_enable;
3031 break;
3032
3033 case BFPT_DWORD15_QER_SR1_BIT6:
3034 params->quad_enable = macronix_quad_enable;
3035 break;
3036
3037 case BFPT_DWORD15_QER_SR2_BIT7:
3038 params->quad_enable = sr2_bit7_quad_enable;
3039 break;
3040
3041 case BFPT_DWORD15_QER_SR2_BIT1:
3042 params->quad_enable = spansion_read_cr_quad_enable;
3043 break;
3044
3045 default:
3046 return -EINVAL;
3047 }
3048
3049 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
3050}
3051
3052#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
3053#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
3054#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
3055#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
3056#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
3057
3058#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
3059#define SMPT_CMD_READ_DUMMY_SHIFT 16
3060#define SMPT_CMD_READ_DUMMY(_cmd) \
3061 (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
3062#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
3063
3064#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
3065#define SMPT_CMD_READ_DATA_SHIFT 24
3066#define SMPT_CMD_READ_DATA(_cmd) \
3067 (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
3068
3069#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
3070#define SMPT_CMD_OPCODE_SHIFT 8
3071#define SMPT_CMD_OPCODE(_cmd) \
3072 (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
3073
3074#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
3075#define SMPT_MAP_REGION_COUNT_SHIFT 16
3076#define SMPT_MAP_REGION_COUNT(_header) \
3077 ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
3078 SMPT_MAP_REGION_COUNT_SHIFT) + 1)
3079
3080#define SMPT_MAP_ID_MASK GENMASK(15, 8)
3081#define SMPT_MAP_ID_SHIFT 8
3082#define SMPT_MAP_ID(_header) \
3083 (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
3084
3085#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
3086#define SMPT_MAP_REGION_SIZE_SHIFT 8
3087#define SMPT_MAP_REGION_SIZE(_region) \
3088 (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
3089 SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
3090
3091#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
3092#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
3093 ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
3094
3095#define SMPT_DESC_TYPE_MAP BIT(1)
3096#define SMPT_DESC_END BIT(0)
3097
3098
3099
3100
3101
3102
3103
3104static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
3105{
3106 switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
3107 case SMPT_CMD_ADDRESS_LEN_0:
3108 return 0;
3109 case SMPT_CMD_ADDRESS_LEN_3:
3110 return 3;
3111 case SMPT_CMD_ADDRESS_LEN_4:
3112 return 4;
3113 case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
3114
3115 default:
3116 return nor->addr_width;
3117 }
3118}
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
3129{
3130 u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
3131
3132 if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
3133 return nor->read_dummy;
3134 return read_dummy;
3135}
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
3146 u8 smpt_len)
3147{
3148 const u32 *ret;
3149 u8 *buf;
3150 u32 addr;
3151 int err;
3152 u8 i;
3153 u8 addr_width, read_opcode, read_dummy;
3154 u8 read_data_mask, map_id;
3155
3156
3157 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3158 if (!buf)
3159 return ERR_PTR(-ENOMEM);
3160
3161 addr_width = nor->addr_width;
3162 read_dummy = nor->read_dummy;
3163 read_opcode = nor->read_opcode;
3164
3165 map_id = 0;
3166
3167 for (i = 0; i < smpt_len; i += 2) {
3168 if (smpt[i] & SMPT_DESC_TYPE_MAP)
3169 break;
3170
3171 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
3172 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
3173 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
3174 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
3175 addr = smpt[i + 1];
3176
3177 err = spi_nor_read_raw(nor, addr, 1, buf);
3178 if (err) {
3179 ret = ERR_PTR(err);
3180 goto out;
3181 }
3182
3183
3184
3185
3186
3187 map_id = map_id << 1 | !!(*buf & read_data_mask);
3188 }
3189
3190
3191
3192
3193
3194
3195
3196
3197 ret = ERR_PTR(-EINVAL);
3198 while (i < smpt_len) {
3199 if (SMPT_MAP_ID(smpt[i]) == map_id) {
3200 ret = smpt + i;
3201 break;
3202 }
3203
3204
3205
3206
3207
3208
3209 if (smpt[i] & SMPT_DESC_END)
3210 break;
3211
3212
3213 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
3214 }
3215
3216
3217out:
3218 kfree(buf);
3219 nor->addr_width = addr_width;
3220 nor->read_dummy = read_dummy;
3221 nor->read_opcode = read_opcode;
3222 return ret;
3223}
3224
3225
3226
3227
3228
3229
3230
3231static void
3232spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
3233 const struct spi_nor_erase_type *erase,
3234 const u8 erase_type)
3235{
3236 int i;
3237
3238 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3239 if (!(erase_type & BIT(i)))
3240 continue;
3241 if (region->size & erase[i].size_mask) {
3242 spi_nor_region_mark_overlay(region);
3243 return;
3244 }
3245 }
3246}
3247
3248
3249
3250
3251
3252
3253
3254
3255static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
3256 const u32 *smpt)
3257{
3258 struct spi_nor_erase_map *map = &nor->erase_map;
3259 struct spi_nor_erase_type *erase = map->erase_type;
3260 struct spi_nor_erase_region *region;
3261 u64 offset;
3262 u32 region_count;
3263 int i, j;
3264 u8 uniform_erase_type, save_uniform_erase_type;
3265 u8 erase_type, regions_erase_type;
3266
3267 region_count = SMPT_MAP_REGION_COUNT(*smpt);
3268
3269
3270
3271
3272 region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
3273 GFP_KERNEL);
3274 if (!region)
3275 return -ENOMEM;
3276 map->regions = region;
3277
3278 uniform_erase_type = 0xff;
3279 regions_erase_type = 0;
3280 offset = 0;
3281
3282 for (i = 0; i < region_count; i++) {
3283 j = i + 1;
3284 region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
3285 erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
3286 region[i].offset = offset | erase_type;
3287
3288 spi_nor_region_check_overlay(®ion[i], erase, erase_type);
3289
3290
3291
3292
3293
3294 uniform_erase_type &= erase_type;
3295
3296
3297
3298
3299
3300 regions_erase_type |= erase_type;
3301
3302 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
3303 region[i].size;
3304 }
3305
3306 save_uniform_erase_type = map->uniform_erase_type;
3307 map->uniform_erase_type = spi_nor_sort_erase_mask(map,
3308 uniform_erase_type);
3309
3310 if (!regions_erase_type) {
3311
3312
3313
3314
3315 map->uniform_erase_type = save_uniform_erase_type;
3316 return -EINVAL;
3317 }
3318
3319
3320
3321
3322
3323
3324 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3325 if (!(regions_erase_type & BIT(erase[i].idx)))
3326 spi_nor_set_erase_type(&erase[i], 0, 0xFF);
3327
3328 spi_nor_region_mark_end(®ion[i - 1]);
3329
3330 return 0;
3331}
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344static int spi_nor_parse_smpt(struct spi_nor *nor,
3345 const struct sfdp_parameter_header *smpt_header)
3346{
3347 const u32 *sector_map;
3348 u32 *smpt;
3349 size_t len;
3350 u32 addr;
3351 int i, ret;
3352
3353
3354 len = smpt_header->length * sizeof(*smpt);
3355 smpt = kmalloc(len, GFP_KERNEL);
3356 if (!smpt)
3357 return -ENOMEM;
3358
3359 addr = SFDP_PARAM_HEADER_PTP(smpt_header);
3360 ret = spi_nor_read_sfdp(nor, addr, len, smpt);
3361 if (ret)
3362 goto out;
3363
3364
3365 for (i = 0; i < smpt_header->length; i++)
3366 smpt[i] = le32_to_cpu(smpt[i]);
3367
3368 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
3369 if (IS_ERR(sector_map)) {
3370 ret = PTR_ERR(sector_map);
3371 goto out;
3372 }
3373
3374 ret = spi_nor_init_non_uniform_erase_map(nor, sector_map);
3375 if (ret)
3376 goto out;
3377
3378 spi_nor_regions_sort_erase_types(&nor->erase_map);
3379
3380out:
3381 kfree(smpt);
3382 return ret;
3383}
3384
3385#define SFDP_4BAIT_DWORD_MAX 2
3386
3387struct sfdp_4bait {
3388
3389 u32 hwcaps;
3390
3391
3392
3393
3394
3395 u32 supported_bit;
3396};
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407static int spi_nor_parse_4bait(struct spi_nor *nor,
3408 const struct sfdp_parameter_header *param_header,
3409 struct spi_nor_flash_parameter *params)
3410{
3411 static const struct sfdp_4bait reads[] = {
3412 { SNOR_HWCAPS_READ, BIT(0) },
3413 { SNOR_HWCAPS_READ_FAST, BIT(1) },
3414 { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
3415 { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
3416 { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
3417 { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
3418 { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
3419 { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
3420 { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
3421 };
3422 static const struct sfdp_4bait programs[] = {
3423 { SNOR_HWCAPS_PP, BIT(6) },
3424 { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
3425 { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
3426 };
3427 static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
3428 { 0u , BIT(9) },
3429 { 0u , BIT(10) },
3430 { 0u , BIT(11) },
3431 { 0u , BIT(12) },
3432 };
3433 struct spi_nor_pp_command *params_pp = params->page_programs;
3434 struct spi_nor_erase_map *map = &nor->erase_map;
3435 struct spi_nor_erase_type *erase_type = map->erase_type;
3436 u32 *dwords;
3437 size_t len;
3438 u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
3439 int i, ret;
3440
3441 if (param_header->major != SFDP_JESD216_MAJOR ||
3442 param_header->length < SFDP_4BAIT_DWORD_MAX)
3443 return -EINVAL;
3444
3445
3446 len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
3447
3448
3449 dwords = kmalloc(len, GFP_KERNEL);
3450 if (!dwords)
3451 return -ENOMEM;
3452
3453 addr = SFDP_PARAM_HEADER_PTP(param_header);
3454 ret = spi_nor_read_sfdp(nor, addr, len, dwords);
3455 if (ret)
3456 return ret;
3457
3458
3459 for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
3460 dwords[i] = le32_to_cpu(dwords[i]);
3461
3462
3463
3464
3465
3466 discard_hwcaps = 0;
3467 read_hwcaps = 0;
3468 for (i = 0; i < ARRAY_SIZE(reads); i++) {
3469 const struct sfdp_4bait *read = &reads[i];
3470
3471 discard_hwcaps |= read->hwcaps;
3472 if ((params->hwcaps.mask & read->hwcaps) &&
3473 (dwords[0] & read->supported_bit))
3474 read_hwcaps |= read->hwcaps;
3475 }
3476
3477
3478
3479
3480
3481 pp_hwcaps = 0;
3482 for (i = 0; i < ARRAY_SIZE(programs); i++) {
3483 const struct sfdp_4bait *program = &programs[i];
3484
3485
3486
3487
3488
3489
3490
3491 discard_hwcaps |= program->hwcaps;
3492 if (dwords[0] & program->supported_bit)
3493 pp_hwcaps |= program->hwcaps;
3494 }
3495
3496
3497
3498
3499
3500 erase_mask = 0;
3501 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3502 const struct sfdp_4bait *erase = &erases[i];
3503
3504 if (dwords[0] & erase->supported_bit)
3505 erase_mask |= BIT(i);
3506 }
3507
3508
3509 erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
3510
3511
3512
3513
3514
3515
3516 if (!read_hwcaps || !pp_hwcaps || !erase_mask)
3517 goto out;
3518
3519
3520
3521
3522
3523 params->hwcaps.mask &= ~discard_hwcaps;
3524 params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
3525
3526
3527 for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
3528 struct spi_nor_read_command *read_cmd = ¶ms->reads[i];
3529
3530 read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
3531 }
3532
3533
3534 if (pp_hwcaps & SNOR_HWCAPS_PP)
3535 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP],
3536 SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
3537 if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
3538 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4],
3539 SPINOR_OP_PP_1_1_4_4B,
3540 SNOR_PROTO_1_1_4);
3541 if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
3542 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_4_4],
3543 SPINOR_OP_PP_1_4_4_4B,
3544 SNOR_PROTO_1_4_4);
3545
3546 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3547 if (erase_mask & BIT(i))
3548 erase_type[i].opcode = (dwords[1] >>
3549 erase_type[i].idx * 8) & 0xFF;
3550 else
3551 spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
3552 }
3553
3554
3555
3556
3557
3558
3559
3560
3561 nor->addr_width = 4;
3562 nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
3563
3564
3565out:
3566 kfree(dwords);
3567 return ret;
3568}
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584static int spi_nor_parse_sfdp(struct spi_nor *nor,
3585 struct spi_nor_flash_parameter *params)
3586{
3587 const struct sfdp_parameter_header *param_header, *bfpt_header;
3588 struct sfdp_parameter_header *param_headers = NULL;
3589 struct sfdp_header header;
3590 struct device *dev = nor->dev;
3591 size_t psize;
3592 int i, err;
3593
3594
3595 err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
3596 if (err < 0)
3597 return err;
3598
3599
3600 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
3601 header.major != SFDP_JESD216_MAJOR)
3602 return -EINVAL;
3603
3604
3605
3606
3607
3608 bfpt_header = &header.bfpt_header;
3609 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
3610 bfpt_header->major != SFDP_JESD216_MAJOR)
3611 return -EINVAL;
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624 if (header.nph) {
3625 psize = header.nph * sizeof(*param_headers);
3626
3627 param_headers = kmalloc(psize, GFP_KERNEL);
3628 if (!param_headers)
3629 return -ENOMEM;
3630
3631 err = spi_nor_read_sfdp(nor, sizeof(header),
3632 psize, param_headers);
3633 if (err < 0) {
3634 dev_err(dev, "failed to read SFDP parameter headers\n");
3635 goto exit;
3636 }
3637 }
3638
3639
3640
3641
3642
3643 for (i = 0; i < header.nph; i++) {
3644 param_header = ¶m_headers[i];
3645
3646 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
3647 param_header->major == SFDP_JESD216_MAJOR &&
3648 (param_header->minor > bfpt_header->minor ||
3649 (param_header->minor == bfpt_header->minor &&
3650 param_header->length > bfpt_header->length)))
3651 bfpt_header = param_header;
3652 }
3653
3654 err = spi_nor_parse_bfpt(nor, bfpt_header, params);
3655 if (err)
3656 goto exit;
3657
3658
3659 for (i = 0; i < header.nph; i++) {
3660 param_header = ¶m_headers[i];
3661
3662 switch (SFDP_PARAM_HEADER_ID(param_header)) {
3663 case SFDP_SECTOR_MAP_ID:
3664 err = spi_nor_parse_smpt(nor, param_header);
3665 break;
3666
3667 case SFDP_4BAIT_ID:
3668 err = spi_nor_parse_4bait(nor, param_header, params);
3669 break;
3670
3671 default:
3672 break;
3673 }
3674
3675 if (err) {
3676 dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
3677 SFDP_PARAM_HEADER_ID(param_header));
3678
3679
3680
3681
3682
3683
3684 err = 0;
3685 }
3686 }
3687
3688exit:
3689 kfree(param_headers);
3690 return err;
3691}
3692
3693static int spi_nor_init_params(struct spi_nor *nor,
3694 struct spi_nor_flash_parameter *params)
3695{
3696 struct spi_nor_erase_map *map = &nor->erase_map;
3697 const struct flash_info *info = nor->info;
3698 u8 i, erase_mask;
3699
3700
3701 memset(params, 0, sizeof(*params));
3702
3703
3704 params->size = (u64)info->sector_size * info->n_sectors;
3705 params->page_size = info->page_size;
3706
3707
3708 params->hwcaps.mask |= SNOR_HWCAPS_READ;
3709 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
3710 0, 0, SPINOR_OP_READ,
3711 SNOR_PROTO_1_1_1);
3712
3713 if (!(info->flags & SPI_NOR_NO_FR)) {
3714 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
3715 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
3716 0, 8, SPINOR_OP_READ_FAST,
3717 SNOR_PROTO_1_1_1);
3718 }
3719
3720 if (info->flags & SPI_NOR_DUAL_READ) {
3721 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
3722 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
3723 0, 8, SPINOR_OP_READ_1_1_2,
3724 SNOR_PROTO_1_1_2);
3725 }
3726
3727 if (info->flags & SPI_NOR_QUAD_READ) {
3728 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
3729 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
3730 0, 8, SPINOR_OP_READ_1_1_4,
3731 SNOR_PROTO_1_1_4);
3732 }
3733
3734 if (info->flags & SPI_NOR_OCTAL_READ) {
3735 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
3736 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
3737 0, 8, SPINOR_OP_READ_1_1_8,
3738 SNOR_PROTO_1_1_8);
3739 }
3740
3741
3742 params->hwcaps.mask |= SNOR_HWCAPS_PP;
3743 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
3744 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
3745
3746
3747
3748
3749
3750 erase_mask = 0;
3751 i = 0;
3752 if (info->flags & SECT_4K_PMC) {
3753 erase_mask |= BIT(i);
3754 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
3755 SPINOR_OP_BE_4K_PMC);
3756 i++;
3757 } else if (info->flags & SECT_4K) {
3758 erase_mask |= BIT(i);
3759 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
3760 SPINOR_OP_BE_4K);
3761 i++;
3762 }
3763 erase_mask |= BIT(i);
3764 spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
3765 SPINOR_OP_SE);
3766 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
3767
3768
3769 if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
3770 SNOR_HWCAPS_PP_QUAD)) {
3771 switch (JEDEC_MFR(info)) {
3772 case SNOR_MFR_MACRONIX:
3773 params->quad_enable = macronix_quad_enable;
3774 break;
3775
3776 case SNOR_MFR_ST:
3777 case SNOR_MFR_MICRON:
3778 break;
3779
3780 default:
3781
3782 params->quad_enable = spansion_quad_enable;
3783 break;
3784 }
3785
3786
3787
3788
3789
3790
3791
3792 if (info->quad_enable)
3793 params->quad_enable = info->quad_enable;
3794 }
3795
3796 if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
3797 !(info->flags & SPI_NOR_SKIP_SFDP)) {
3798 struct spi_nor_flash_parameter sfdp_params;
3799 struct spi_nor_erase_map prev_map;
3800
3801 memcpy(&sfdp_params, params, sizeof(sfdp_params));
3802 memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
3803
3804 if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
3805 nor->addr_width = 0;
3806 nor->flags &= ~SNOR_F_4B_OPCODES;
3807
3808 memcpy(&nor->erase_map, &prev_map,
3809 sizeof(nor->erase_map));
3810 } else {
3811 memcpy(params, &sfdp_params, sizeof(*params));
3812 }
3813 }
3814
3815 return 0;
3816}
3817
3818static int spi_nor_select_read(struct spi_nor *nor,
3819 const struct spi_nor_flash_parameter *params,
3820 u32 shared_hwcaps)
3821{
3822 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
3823 const struct spi_nor_read_command *read;
3824
3825 if (best_match < 0)
3826 return -EINVAL;
3827
3828 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
3829 if (cmd < 0)
3830 return -EINVAL;
3831
3832 read = ¶ms->reads[cmd];
3833 nor->read_opcode = read->opcode;
3834 nor->read_proto = read->proto;
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
3847 return 0;
3848}
3849
3850static int spi_nor_select_pp(struct spi_nor *nor,
3851 const struct spi_nor_flash_parameter *params,
3852 u32 shared_hwcaps)
3853{
3854 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
3855 const struct spi_nor_pp_command *pp;
3856
3857 if (best_match < 0)
3858 return -EINVAL;
3859
3860 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
3861 if (cmd < 0)
3862 return -EINVAL;
3863
3864 pp = ¶ms->page_programs[cmd];
3865 nor->program_opcode = pp->opcode;
3866 nor->write_proto = pp->proto;
3867 return 0;
3868}
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882static const struct spi_nor_erase_type *
3883spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
3884 const u32 wanted_size)
3885{
3886 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
3887 int i;
3888 u8 uniform_erase_type = map->uniform_erase_type;
3889
3890 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3891 if (!(uniform_erase_type & BIT(i)))
3892 continue;
3893
3894 tested_erase = &map->erase_type[i];
3895
3896
3897
3898
3899
3900 if (tested_erase->size == wanted_size) {
3901 erase = tested_erase;
3902 break;
3903 }
3904
3905
3906
3907
3908
3909 if (!erase && tested_erase->size)
3910 erase = tested_erase;
3911
3912 }
3913
3914 if (!erase)
3915 return NULL;
3916
3917
3918 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
3919 map->uniform_erase_type |= BIT(erase - map->erase_type);
3920 return erase;
3921}
3922
3923static int spi_nor_select_erase(struct spi_nor *nor, u32 wanted_size)
3924{
3925 struct spi_nor_erase_map *map = &nor->erase_map;
3926 const struct spi_nor_erase_type *erase = NULL;
3927 struct mtd_info *mtd = &nor->mtd;
3928 int i;
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
3939
3940 wanted_size = 4096u;
3941#endif
3942
3943 if (spi_nor_has_uniform_erase(nor)) {
3944 erase = spi_nor_select_uniform_erase(map, wanted_size);
3945 if (!erase)
3946 return -EINVAL;
3947 nor->erase_opcode = erase->opcode;
3948 mtd->erasesize = erase->size;
3949 return 0;
3950 }
3951
3952
3953
3954
3955
3956 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3957 if (map->erase_type[i].size) {
3958 erase = &map->erase_type[i];
3959 break;
3960 }
3961 }
3962
3963 if (!erase)
3964 return -EINVAL;
3965
3966 mtd->erasesize = erase->size;
3967 return 0;
3968}
3969
3970static int spi_nor_setup(struct spi_nor *nor,
3971 const struct spi_nor_flash_parameter *params,
3972 const struct spi_nor_hwcaps *hwcaps)
3973{
3974 u32 ignored_mask, shared_mask;
3975 bool enable_quad_io;
3976 int err;
3977
3978
3979
3980
3981
3982 shared_mask = hwcaps->mask & params->hwcaps.mask;
3983
3984
3985 ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
3986 SNOR_HWCAPS_READ_4_4_4 |
3987 SNOR_HWCAPS_READ_8_8_8 |
3988 SNOR_HWCAPS_PP_4_4_4 |
3989 SNOR_HWCAPS_PP_8_8_8);
3990 if (shared_mask & ignored_mask) {
3991 dev_dbg(nor->dev,
3992 "SPI n-n-n protocols are not supported yet.\n");
3993 shared_mask &= ~ignored_mask;
3994 }
3995
3996
3997 err = spi_nor_select_read(nor, params, shared_mask);
3998 if (err) {
3999 dev_err(nor->dev,
4000 "can't select read settings supported by both the SPI controller and memory.\n");
4001 return err;
4002 }
4003
4004
4005 err = spi_nor_select_pp(nor, params, shared_mask);
4006 if (err) {
4007 dev_err(nor->dev,
4008 "can't select write settings supported by both the SPI controller and memory.\n");
4009 return err;
4010 }
4011
4012
4013 err = spi_nor_select_erase(nor, nor->info->sector_size);
4014 if (err) {
4015 dev_err(nor->dev,
4016 "can't select erase settings supported by both the SPI controller and memory.\n");
4017 return err;
4018 }
4019
4020
4021 enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
4022 spi_nor_get_protocol_width(nor->write_proto) == 4);
4023 if (enable_quad_io && params->quad_enable)
4024 nor->quad_enable = params->quad_enable;
4025 else
4026 nor->quad_enable = NULL;
4027
4028 return 0;
4029}
4030
4031static int spi_nor_init(struct spi_nor *nor)
4032{
4033 int err;
4034
4035 if (nor->clear_sr_bp) {
4036 if (nor->quad_enable == spansion_quad_enable)
4037 nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
4038
4039 err = nor->clear_sr_bp(nor);
4040 if (err) {
4041 dev_err(nor->dev,
4042 "fail to clear block protection bits\n");
4043 return err;
4044 }
4045 }
4046
4047 if (nor->quad_enable) {
4048 err = nor->quad_enable(nor);
4049 if (err) {
4050 dev_err(nor->dev, "quad mode not supported\n");
4051 return err;
4052 }
4053 }
4054
4055 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
4056
4057
4058
4059
4060
4061
4062
4063 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
4064 "enabling reset hack; may not recover from unexpected reboots\n");
4065 set_4byte(nor, true);
4066 }
4067
4068 return 0;
4069}
4070
4071
4072static void spi_nor_resume(struct mtd_info *mtd)
4073{
4074 struct spi_nor *nor = mtd_to_spi_nor(mtd);
4075 struct device *dev = nor->dev;
4076 int ret;
4077
4078
4079 ret = spi_nor_init(nor);
4080 if (ret)
4081 dev_err(dev, "resume() failed\n");
4082}
4083
4084void spi_nor_restore(struct spi_nor *nor)
4085{
4086
4087 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
4088 nor->flags & SNOR_F_BROKEN_RESET)
4089 set_4byte(nor, false);
4090}
4091EXPORT_SYMBOL_GPL(spi_nor_restore);
4092
4093static const struct flash_info *spi_nor_match_id(const char *name)
4094{
4095 const struct flash_info *id = spi_nor_ids;
4096
4097 while (id->name) {
4098 if (!strcmp(name, id->name))
4099 return id;
4100 id++;
4101 }
4102 return NULL;
4103}
4104
4105int spi_nor_scan(struct spi_nor *nor, const char *name,
4106 const struct spi_nor_hwcaps *hwcaps)
4107{
4108 struct spi_nor_flash_parameter params;
4109 const struct flash_info *info = NULL;
4110 struct device *dev = nor->dev;
4111 struct mtd_info *mtd = &nor->mtd;
4112 struct device_node *np = spi_nor_get_flash_node(nor);
4113 int ret;
4114 int i;
4115
4116 ret = spi_nor_check(nor);
4117 if (ret)
4118 return ret;
4119
4120
4121 nor->reg_proto = SNOR_PROTO_1_1_1;
4122 nor->read_proto = SNOR_PROTO_1_1_1;
4123 nor->write_proto = SNOR_PROTO_1_1_1;
4124
4125 if (name)
4126 info = spi_nor_match_id(name);
4127
4128 if (!info)
4129 info = spi_nor_read_id(nor);
4130 if (IS_ERR_OR_NULL(info))
4131 return -ENOENT;
4132
4133
4134
4135
4136
4137 if (name && info->id_len) {
4138 const struct flash_info *jinfo;
4139
4140 jinfo = spi_nor_read_id(nor);
4141 if (IS_ERR(jinfo)) {
4142 return PTR_ERR(jinfo);
4143 } else if (jinfo != info) {
4144
4145
4146
4147
4148
4149
4150
4151 dev_warn(dev, "found %s, expected %s\n",
4152 jinfo->name, info->name);
4153 info = jinfo;
4154 }
4155 }
4156
4157 nor->info = info;
4158
4159 mutex_init(&nor->lock);
4160
4161
4162
4163
4164
4165
4166 if (info->flags & SPI_S3AN)
4167 nor->flags |= SNOR_F_READY_XSR_RDY;
4168
4169
4170
4171
4172
4173 if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
4174 JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
4175 JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
4176 nor->info->flags & SPI_NOR_HAS_LOCK)
4177 nor->clear_sr_bp = spi_nor_clear_sr_bp;
4178
4179
4180 ret = spi_nor_init_params(nor, ¶ms);
4181 if (ret)
4182 return ret;
4183
4184 if (!mtd->name)
4185 mtd->name = dev_name(dev);
4186 mtd->priv = nor;
4187 mtd->type = MTD_NORFLASH;
4188 mtd->writesize = 1;
4189 mtd->flags = MTD_CAP_NORFLASH;
4190 mtd->size = params.size;
4191 mtd->_erase = spi_nor_erase;
4192 mtd->_read = spi_nor_read;
4193 mtd->_resume = spi_nor_resume;
4194
4195
4196 if (JEDEC_MFR(info) == SNOR_MFR_ST ||
4197 JEDEC_MFR(info) == SNOR_MFR_MICRON ||
4198 info->flags & SPI_NOR_HAS_LOCK) {
4199 nor->flash_lock = stm_lock;
4200 nor->flash_unlock = stm_unlock;
4201 nor->flash_is_locked = stm_is_locked;
4202 }
4203
4204 if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
4205 mtd->_lock = spi_nor_lock;
4206 mtd->_unlock = spi_nor_unlock;
4207 mtd->_is_locked = spi_nor_is_locked;
4208 }
4209
4210
4211 if (info->flags & SST_WRITE)
4212 mtd->_write = sst_write;
4213 else
4214 mtd->_write = spi_nor_write;
4215
4216 if (info->flags & USE_FSR)
4217 nor->flags |= SNOR_F_USE_FSR;
4218 if (info->flags & SPI_NOR_HAS_TB)
4219 nor->flags |= SNOR_F_HAS_SR_TB;
4220 if (info->flags & NO_CHIP_ERASE)
4221 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
4222 if (info->flags & USE_CLSR)
4223 nor->flags |= SNOR_F_USE_CLSR;
4224
4225 if (info->flags & SPI_NOR_NO_ERASE)
4226 mtd->flags |= MTD_NO_ERASE;
4227
4228 mtd->dev.parent = dev;
4229 nor->page_size = params.page_size;
4230 mtd->writebufsize = nor->page_size;
4231
4232 if (np) {
4233
4234 if (of_property_read_bool(np, "m25p,fast-read"))
4235 params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
4236 else
4237 params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
4238 } else {
4239
4240 params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
4241 }
4242
4243 if (of_property_read_bool(np, "broken-flash-reset"))
4244 nor->flags |= SNOR_F_BROKEN_RESET;
4245
4246
4247 if (info->flags & SPI_NOR_NO_FR)
4248 params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
4249
4250
4251
4252
4253
4254
4255
4256
4257 ret = spi_nor_setup(nor, ¶ms, hwcaps);
4258 if (ret)
4259 return ret;
4260
4261 if (nor->addr_width) {
4262
4263 } else if (info->addr_width) {
4264 nor->addr_width = info->addr_width;
4265 } else if (mtd->size > 0x1000000) {
4266
4267 nor->addr_width = 4;
4268 } else {
4269 nor->addr_width = 3;
4270 }
4271
4272 if (info->flags & SPI_NOR_4B_OPCODES ||
4273 (JEDEC_MFR(info) == SNOR_MFR_SPANSION && mtd->size > SZ_16M))
4274 nor->flags |= SNOR_F_4B_OPCODES;
4275
4276 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
4277 !(nor->flags & SNOR_F_HAS_4BAIT))
4278 spi_nor_set_4byte_opcodes(nor);
4279
4280 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4281 dev_err(dev, "address width is too large: %u\n",
4282 nor->addr_width);
4283 return -EINVAL;
4284 }
4285
4286 if (info->flags & SPI_S3AN) {
4287 ret = s3an_nor_scan(nor);
4288 if (ret)
4289 return ret;
4290 }
4291
4292
4293 ret = spi_nor_init(nor);
4294 if (ret)
4295 return ret;
4296
4297 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
4298 (long long)mtd->size >> 10);
4299
4300 dev_dbg(dev,
4301 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
4302 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
4303 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
4304 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
4305
4306 if (mtd->numeraseregions)
4307 for (i = 0; i < mtd->numeraseregions; i++)
4308 dev_dbg(dev,
4309 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
4310 ".erasesize = 0x%.8x (%uKiB), "
4311 ".numblocks = %d }\n",
4312 i, (long long)mtd->eraseregions[i].offset,
4313 mtd->eraseregions[i].erasesize,
4314 mtd->eraseregions[i].erasesize / 1024,
4315 mtd->eraseregions[i].numblocks);
4316 return 0;
4317}
4318EXPORT_SYMBOL_GPL(spi_nor_scan);
4319
4320MODULE_LICENSE("GPL v2");
4321MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
4322MODULE_AUTHOR("Mike Lavender");
4323MODULE_DESCRIPTION("framework for SPI NOR");
4324