1
2
3
4
5
6
7
8
9
10#include <linux/err.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/mutex.h>
15#include <linux/math64.h>
16#include <linux/sizes.h>
17#include <linux/slab.h>
18
19#include <linux/mtd/mtd.h>
20#include <linux/of_platform.h>
21#include <linux/sched/task_stack.h>
22#include <linux/spi/flash.h>
23#include <linux/mtd/spi-nor.h>
24
25#include "core.h"
26
27
28
29
30
31
32
33#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
34
35
36
37
38
39#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
40
41#define SPI_NOR_MAX_ADDR_WIDTH 4
42
43
44
45
46
47
48
49
50
51
52
53static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
54{
55
56 if (object_is_on_stack(op->data.buf.in) ||
57 !virt_addr_valid(op->data.buf.in)) {
58 if (op->data.nbytes > nor->bouncebuf_size)
59 op->data.nbytes = nor->bouncebuf_size;
60 op->data.buf.in = nor->bouncebuf;
61 return true;
62 }
63
64 return false;
65}
66
67
68
69
70
71
72
73
74static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
75{
76 int error;
77
78 error = spi_mem_adjust_op_size(nor->spimem, op);
79 if (error)
80 return error;
81
82 return spi_mem_exec_op(nor->spimem, op);
83}
84
85
86
87
88
89
90
91
92
93
94
95static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
96 size_t len, u8 *buf)
97{
98 struct spi_mem_op op =
99 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
100 SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
101 SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
102 SPI_MEM_OP_DATA_IN(len, buf, 1));
103 bool usebouncebuf;
104 ssize_t nbytes;
105 int error;
106
107
108 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
109 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
110 op.dummy.buswidth = op.addr.buswidth;
111 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
112
113
114 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
115
116 usebouncebuf = spi_nor_spimem_bounce(nor, &op);
117
118 if (nor->dirmap.rdesc) {
119 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
120 op.data.nbytes, op.data.buf.in);
121 } else {
122 error = spi_nor_spimem_exec_op(nor, &op);
123 if (error)
124 return error;
125 nbytes = op.data.nbytes;
126 }
127
128 if (usebouncebuf && nbytes > 0)
129 memcpy(buf, op.data.buf.in, nbytes);
130
131 return nbytes;
132}
133
134
135
136
137
138
139
140
141
142
143ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
144{
145 if (nor->spimem)
146 return spi_nor_spimem_read_data(nor, from, len, buf);
147
148 return nor->controller_ops->read(nor, from, len, buf);
149}
150
151
152
153
154
155
156
157
158
159
160
161static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
162 size_t len, const u8 *buf)
163{
164 struct spi_mem_op op =
165 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
166 SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
167 SPI_MEM_OP_NO_DUMMY,
168 SPI_MEM_OP_DATA_OUT(len, buf, 1));
169 ssize_t nbytes;
170 int error;
171
172 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
173 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
174 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
175
176 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
177 op.addr.nbytes = 0;
178
179 if (spi_nor_spimem_bounce(nor, &op))
180 memcpy(nor->bouncebuf, buf, op.data.nbytes);
181
182 if (nor->dirmap.wdesc) {
183 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
184 op.data.nbytes, op.data.buf.out);
185 } else {
186 error = spi_nor_spimem_exec_op(nor, &op);
187 if (error)
188 return error;
189 nbytes = op.data.nbytes;
190 }
191
192 return nbytes;
193}
194
195
196
197
198
199
200
201
202
203
204ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
205 const u8 *buf)
206{
207 if (nor->spimem)
208 return spi_nor_spimem_write_data(nor, to, len, buf);
209
210 return nor->controller_ops->write(nor, to, len, buf);
211}
212
213
214
215
216
217
218
219int spi_nor_write_enable(struct spi_nor *nor)
220{
221 int ret;
222
223 if (nor->spimem) {
224 struct spi_mem_op op =
225 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
226 SPI_MEM_OP_NO_ADDR,
227 SPI_MEM_OP_NO_DUMMY,
228 SPI_MEM_OP_NO_DATA);
229
230 ret = spi_mem_exec_op(nor->spimem, &op);
231 } else {
232 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
233 NULL, 0);
234 }
235
236 if (ret)
237 dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
238
239 return ret;
240}
241
242
243
244
245
246
247
248int spi_nor_write_disable(struct spi_nor *nor)
249{
250 int ret;
251
252 if (nor->spimem) {
253 struct spi_mem_op op =
254 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
255 SPI_MEM_OP_NO_ADDR,
256 SPI_MEM_OP_NO_DUMMY,
257 SPI_MEM_OP_NO_DATA);
258
259 ret = spi_mem_exec_op(nor->spimem, &op);
260 } else {
261 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
262 NULL, 0);
263 }
264
265 if (ret)
266 dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
267
268 return ret;
269}
270
271
272
273
274
275
276
277
278
279static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
280{
281 int ret;
282
283 if (nor->spimem) {
284 struct spi_mem_op op =
285 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
286 SPI_MEM_OP_NO_ADDR,
287 SPI_MEM_OP_NO_DUMMY,
288 SPI_MEM_OP_DATA_IN(1, sr, 1));
289
290 ret = spi_mem_exec_op(nor->spimem, &op);
291 } else {
292 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
293 sr, 1);
294 }
295
296 if (ret)
297 dev_dbg(nor->dev, "error %d reading SR\n", ret);
298
299 return ret;
300}
301
302
303
304
305
306
307
308
309
310static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
311{
312 int ret;
313
314 if (nor->spimem) {
315 struct spi_mem_op op =
316 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
317 SPI_MEM_OP_NO_ADDR,
318 SPI_MEM_OP_NO_DUMMY,
319 SPI_MEM_OP_DATA_IN(1, fsr, 1));
320
321 ret = spi_mem_exec_op(nor->spimem, &op);
322 } else {
323 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
324 fsr, 1);
325 }
326
327 if (ret)
328 dev_dbg(nor->dev, "error %d reading FSR\n", ret);
329
330 return ret;
331}
332
333
334
335
336
337
338
339
340
341
342static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
343{
344 int ret;
345
346 if (nor->spimem) {
347 struct spi_mem_op op =
348 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
349 SPI_MEM_OP_NO_ADDR,
350 SPI_MEM_OP_NO_DUMMY,
351 SPI_MEM_OP_DATA_IN(1, cr, 1));
352
353 ret = spi_mem_exec_op(nor->spimem, &op);
354 } else {
355 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
356 }
357
358 if (ret)
359 dev_dbg(nor->dev, "error %d reading CR\n", ret);
360
361 return ret;
362}
363
364
365
366
367
368
369
370
371
372int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
373{
374 int ret;
375
376 if (nor->spimem) {
377 struct spi_mem_op op =
378 SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
379 SPINOR_OP_EN4B :
380 SPINOR_OP_EX4B,
381 1),
382 SPI_MEM_OP_NO_ADDR,
383 SPI_MEM_OP_NO_DUMMY,
384 SPI_MEM_OP_NO_DATA);
385
386 ret = spi_mem_exec_op(nor->spimem, &op);
387 } else {
388 ret = nor->controller_ops->write_reg(nor,
389 enable ? SPINOR_OP_EN4B :
390 SPINOR_OP_EX4B,
391 NULL, 0);
392 }
393
394 if (ret)
395 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
396
397 return ret;
398}
399
400
401
402
403
404
405
406
407
408
409static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
410{
411 int ret;
412
413 nor->bouncebuf[0] = enable << 7;
414
415 if (nor->spimem) {
416 struct spi_mem_op op =
417 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
418 SPI_MEM_OP_NO_ADDR,
419 SPI_MEM_OP_NO_DUMMY,
420 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
421
422 ret = spi_mem_exec_op(nor->spimem, &op);
423 } else {
424 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
425 nor->bouncebuf, 1);
426 }
427
428 if (ret)
429 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
430
431 return ret;
432}
433
434
435
436
437
438
439
440
441int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
442{
443 int ret;
444
445 nor->bouncebuf[0] = ear;
446
447 if (nor->spimem) {
448 struct spi_mem_op op =
449 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
450 SPI_MEM_OP_NO_ADDR,
451 SPI_MEM_OP_NO_DUMMY,
452 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
453
454 ret = spi_mem_exec_op(nor->spimem, &op);
455 } else {
456 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
457 nor->bouncebuf, 1);
458 }
459
460 if (ret)
461 dev_dbg(nor->dev, "error %d writing EAR\n", ret);
462
463 return ret;
464}
465
466
467
468
469
470
471
472
473
474int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
475{
476 int ret;
477
478 if (nor->spimem) {
479 struct spi_mem_op op =
480 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
481 SPI_MEM_OP_NO_ADDR,
482 SPI_MEM_OP_NO_DUMMY,
483 SPI_MEM_OP_DATA_IN(1, sr, 1));
484
485 ret = spi_mem_exec_op(nor->spimem, &op);
486 } else {
487 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
488 sr, 1);
489 }
490
491 if (ret)
492 dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
493
494 return ret;
495}
496
497
498
499
500
501
502
503
504static int spi_nor_xsr_ready(struct spi_nor *nor)
505{
506 int ret;
507
508 ret = spi_nor_xread_sr(nor, nor->bouncebuf);
509 if (ret)
510 return ret;
511
512 return !!(nor->bouncebuf[0] & XSR_RDY);
513}
514
515
516
517
518
519static void spi_nor_clear_sr(struct spi_nor *nor)
520{
521 int ret;
522
523 if (nor->spimem) {
524 struct spi_mem_op op =
525 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
526 SPI_MEM_OP_NO_ADDR,
527 SPI_MEM_OP_NO_DUMMY,
528 SPI_MEM_OP_NO_DATA);
529
530 ret = spi_mem_exec_op(nor->spimem, &op);
531 } else {
532 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
533 NULL, 0);
534 }
535
536 if (ret)
537 dev_dbg(nor->dev, "error %d clearing SR\n", ret);
538}
539
540
541
542
543
544
545
546
547static int spi_nor_sr_ready(struct spi_nor *nor)
548{
549 int ret = spi_nor_read_sr(nor, nor->bouncebuf);
550
551 if (ret)
552 return ret;
553
554 if (nor->flags & SNOR_F_USE_CLSR &&
555 nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
556 if (nor->bouncebuf[0] & SR_E_ERR)
557 dev_err(nor->dev, "Erase Error occurred\n");
558 else
559 dev_err(nor->dev, "Programming Error occurred\n");
560
561 spi_nor_clear_sr(nor);
562
563
564
565
566
567
568
569 ret = spi_nor_write_disable(nor);
570 if (ret)
571 return ret;
572
573 return -EIO;
574 }
575
576 return !(nor->bouncebuf[0] & SR_WIP);
577}
578
579
580
581
582
583static void spi_nor_clear_fsr(struct spi_nor *nor)
584{
585 int ret;
586
587 if (nor->spimem) {
588 struct spi_mem_op op =
589 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
590 SPI_MEM_OP_NO_ADDR,
591 SPI_MEM_OP_NO_DUMMY,
592 SPI_MEM_OP_NO_DATA);
593
594 ret = spi_mem_exec_op(nor->spimem, &op);
595 } else {
596 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
597 NULL, 0);
598 }
599
600 if (ret)
601 dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
602}
603
604
605
606
607
608
609
610
611static int spi_nor_fsr_ready(struct spi_nor *nor)
612{
613 int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
614
615 if (ret)
616 return ret;
617
618 if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
619 if (nor->bouncebuf[0] & FSR_E_ERR)
620 dev_err(nor->dev, "Erase operation failed.\n");
621 else
622 dev_err(nor->dev, "Program operation failed.\n");
623
624 if (nor->bouncebuf[0] & FSR_PT_ERR)
625 dev_err(nor->dev,
626 "Attempted to modify a protected sector.\n");
627
628 spi_nor_clear_fsr(nor);
629
630
631
632
633
634
635
636 ret = spi_nor_write_disable(nor);
637 if (ret)
638 return ret;
639
640 return -EIO;
641 }
642
643 return nor->bouncebuf[0] & FSR_READY;
644}
645
646
647
648
649
650
651
652static int spi_nor_ready(struct spi_nor *nor)
653{
654 int sr, fsr;
655
656 if (nor->flags & SNOR_F_READY_XSR_RDY)
657 sr = spi_nor_xsr_ready(nor);
658 else
659 sr = spi_nor_sr_ready(nor);
660 if (sr < 0)
661 return sr;
662 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
663 if (fsr < 0)
664 return fsr;
665 return sr && fsr;
666}
667
668
669
670
671
672
673
674
675
676static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
677 unsigned long timeout_jiffies)
678{
679 unsigned long deadline;
680 int timeout = 0, ret;
681
682 deadline = jiffies + timeout_jiffies;
683
684 while (!timeout) {
685 if (time_after_eq(jiffies, deadline))
686 timeout = 1;
687
688 ret = spi_nor_ready(nor);
689 if (ret < 0)
690 return ret;
691 if (ret)
692 return 0;
693
694 cond_resched();
695 }
696
697 dev_dbg(nor->dev, "flash operation timed out\n");
698
699 return -ETIMEDOUT;
700}
701
702
703
704
705
706
707
708
709int spi_nor_wait_till_ready(struct spi_nor *nor)
710{
711 return spi_nor_wait_till_ready_with_timeout(nor,
712 DEFAULT_READY_WAIT_JIFFIES);
713}
714
715
716
717
718
719
720
721
722
723static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
724{
725 int ret;
726
727 ret = spi_nor_write_enable(nor);
728 if (ret)
729 return ret;
730
731 if (nor->spimem) {
732 struct spi_mem_op op =
733 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
734 SPI_MEM_OP_NO_ADDR,
735 SPI_MEM_OP_NO_DUMMY,
736 SPI_MEM_OP_DATA_OUT(len, sr, 1));
737
738 ret = spi_mem_exec_op(nor->spimem, &op);
739 } else {
740 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
741 sr, len);
742 }
743
744 if (ret) {
745 dev_dbg(nor->dev, "error %d writing SR\n", ret);
746 return ret;
747 }
748
749 return spi_nor_wait_till_ready(nor);
750}
751
752
753
754
755
756
757
758
759
760static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
761{
762 int ret;
763
764 nor->bouncebuf[0] = sr1;
765
766 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
767 if (ret)
768 return ret;
769
770 ret = spi_nor_read_sr(nor, nor->bouncebuf);
771 if (ret)
772 return ret;
773
774 if (nor->bouncebuf[0] != sr1) {
775 dev_dbg(nor->dev, "SR1: read back test failed\n");
776 return -EIO;
777 }
778
779 return 0;
780}
781
782
783
784
785
786
787
788
789
790
791
792static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
793{
794 int ret;
795 u8 *sr_cr = nor->bouncebuf;
796 u8 cr_written;
797
798
799 if (!(nor->flags & SNOR_F_NO_READ_CR)) {
800 ret = spi_nor_read_cr(nor, &sr_cr[1]);
801 if (ret)
802 return ret;
803 } else if (nor->params->quad_enable) {
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819 sr_cr[1] = SR2_QUAD_EN_BIT1;
820 } else {
821 sr_cr[1] = 0;
822 }
823
824 sr_cr[0] = sr1;
825
826 ret = spi_nor_write_sr(nor, sr_cr, 2);
827 if (ret)
828 return ret;
829
830 if (nor->flags & SNOR_F_NO_READ_CR)
831 return 0;
832
833 cr_written = sr_cr[1];
834
835 ret = spi_nor_read_cr(nor, &sr_cr[1]);
836 if (ret)
837 return ret;
838
839 if (cr_written != sr_cr[1]) {
840 dev_dbg(nor->dev, "CR: read back test failed\n");
841 return -EIO;
842 }
843
844 return 0;
845}
846
847
848
849
850
851
852
853
854
855
856
857static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
858{
859 int ret;
860 u8 *sr_cr = nor->bouncebuf;
861 u8 sr_written;
862
863
864 ret = spi_nor_read_sr(nor, sr_cr);
865 if (ret)
866 return ret;
867
868 sr_cr[1] = cr;
869
870 ret = spi_nor_write_sr(nor, sr_cr, 2);
871 if (ret)
872 return ret;
873
874 sr_written = sr_cr[0];
875
876 ret = spi_nor_read_sr(nor, sr_cr);
877 if (ret)
878 return ret;
879
880 if (sr_written != sr_cr[0]) {
881 dev_dbg(nor->dev, "SR: Read back test failed\n");
882 return -EIO;
883 }
884
885 if (nor->flags & SNOR_F_NO_READ_CR)
886 return 0;
887
888 ret = spi_nor_read_cr(nor, &sr_cr[1]);
889 if (ret)
890 return ret;
891
892 if (cr != sr_cr[1]) {
893 dev_dbg(nor->dev, "CR: read back test failed\n");
894 return -EIO;
895 }
896
897 return 0;
898}
899
900
901
902
903
904
905
906
907
908
909static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
910{
911 if (nor->flags & SNOR_F_HAS_16BIT_SR)
912 return spi_nor_write_16bit_sr_and_check(nor, sr1);
913
914 return spi_nor_write_sr1_and_check(nor, sr1);
915}
916
917
918
919
920
921
922
923
924
925static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
926{
927 int ret;
928
929 ret = spi_nor_write_enable(nor);
930 if (ret)
931 return ret;
932
933 if (nor->spimem) {
934 struct spi_mem_op op =
935 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
936 SPI_MEM_OP_NO_ADDR,
937 SPI_MEM_OP_NO_DUMMY,
938 SPI_MEM_OP_DATA_OUT(1, sr2, 1));
939
940 ret = spi_mem_exec_op(nor->spimem, &op);
941 } else {
942 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
943 sr2, 1);
944 }
945
946 if (ret) {
947 dev_dbg(nor->dev, "error %d writing SR2\n", ret);
948 return ret;
949 }
950
951 return spi_nor_wait_till_ready(nor);
952}
953
954
955
956
957
958
959
960
961
962
963static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
964{
965 int ret;
966
967 if (nor->spimem) {
968 struct spi_mem_op op =
969 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
970 SPI_MEM_OP_NO_ADDR,
971 SPI_MEM_OP_NO_DUMMY,
972 SPI_MEM_OP_DATA_IN(1, sr2, 1));
973
974 ret = spi_mem_exec_op(nor->spimem, &op);
975 } else {
976 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
977 sr2, 1);
978 }
979
980 if (ret)
981 dev_dbg(nor->dev, "error %d reading SR2\n", ret);
982
983 return ret;
984}
985
986
987
988
989
990
991
992static int spi_nor_erase_chip(struct spi_nor *nor)
993{
994 int ret;
995
996 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
997
998 if (nor->spimem) {
999 struct spi_mem_op op =
1000 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
1001 SPI_MEM_OP_NO_ADDR,
1002 SPI_MEM_OP_NO_DUMMY,
1003 SPI_MEM_OP_NO_DATA);
1004
1005 ret = spi_mem_exec_op(nor->spimem, &op);
1006 } else {
1007 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
1008 NULL, 0);
1009 }
1010
1011 if (ret)
1012 dev_dbg(nor->dev, "error %d erasing chip\n", ret);
1013
1014 return ret;
1015}
1016
1017static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
1018{
1019 size_t i;
1020
1021 for (i = 0; i < size; i++)
1022 if (table[i][0] == opcode)
1023 return table[i][1];
1024
1025
1026 return opcode;
1027}
1028
1029u8 spi_nor_convert_3to4_read(u8 opcode)
1030{
1031 static const u8 spi_nor_3to4_read[][2] = {
1032 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
1033 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
1034 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
1035 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
1036 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
1037 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
1038 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
1039 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
1040
1041 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
1042 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
1043 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
1044 };
1045
1046 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1047 ARRAY_SIZE(spi_nor_3to4_read));
1048}
1049
1050static u8 spi_nor_convert_3to4_program(u8 opcode)
1051{
1052 static const u8 spi_nor_3to4_program[][2] = {
1053 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
1054 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
1055 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
1056 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
1057 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
1058 };
1059
1060 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1061 ARRAY_SIZE(spi_nor_3to4_program));
1062}
1063
1064static u8 spi_nor_convert_3to4_erase(u8 opcode)
1065{
1066 static const u8 spi_nor_3to4_erase[][2] = {
1067 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
1068 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
1069 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
1070 };
1071
1072 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1073 ARRAY_SIZE(spi_nor_3to4_erase));
1074}
1075
1076static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1077{
1078 return !!nor->params->erase_map.uniform_erase_type;
1079}
1080
1081static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1082{
1083 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1084 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1085 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1086
1087 if (!spi_nor_has_uniform_erase(nor)) {
1088 struct spi_nor_erase_map *map = &nor->params->erase_map;
1089 struct spi_nor_erase_type *erase;
1090 int i;
1091
1092 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1093 erase = &map->erase_type[i];
1094 erase->opcode =
1095 spi_nor_convert_3to4_erase(erase->opcode);
1096 }
1097 }
1098}
1099
1100int spi_nor_lock_and_prep(struct spi_nor *nor)
1101{
1102 int ret = 0;
1103
1104 mutex_lock(&nor->lock);
1105
1106 if (nor->controller_ops && nor->controller_ops->prepare) {
1107 ret = nor->controller_ops->prepare(nor);
1108 if (ret) {
1109 mutex_unlock(&nor->lock);
1110 return ret;
1111 }
1112 }
1113 return ret;
1114}
1115
1116void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1117{
1118 if (nor->controller_ops && nor->controller_ops->unprepare)
1119 nor->controller_ops->unprepare(nor);
1120 mutex_unlock(&nor->lock);
1121}
1122
1123static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
1124{
1125 if (!nor->params->convert_addr)
1126 return addr;
1127
1128 return nor->params->convert_addr(nor, addr);
1129}
1130
1131
1132
1133
1134static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1135{
1136 int i;
1137
1138 addr = spi_nor_convert_addr(nor, addr);
1139
1140 if (nor->spimem) {
1141 struct spi_mem_op op =
1142 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
1143 SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
1144 SPI_MEM_OP_NO_DUMMY,
1145 SPI_MEM_OP_NO_DATA);
1146
1147 return spi_mem_exec_op(nor->spimem, &op);
1148 } else if (nor->controller_ops->erase) {
1149 return nor->controller_ops->erase(nor, addr);
1150 }
1151
1152
1153
1154
1155
1156 for (i = nor->addr_width - 1; i >= 0; i--) {
1157 nor->bouncebuf[i] = addr & 0xff;
1158 addr >>= 8;
1159 }
1160
1161 return nor->controller_ops->write_reg(nor, nor->erase_opcode,
1162 nor->bouncebuf, nor->addr_width);
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1174 u64 dividend, u32 *remainder)
1175{
1176
1177 *remainder = (u32)dividend & erase->size_mask;
1178 return dividend >> erase->size_shift;
1179}
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194static const struct spi_nor_erase_type *
1195spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1196 const struct spi_nor_erase_region *region,
1197 u64 addr, u32 len)
1198{
1199 const struct spi_nor_erase_type *erase;
1200 u32 rem;
1201 int i;
1202 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1203
1204
1205
1206
1207
1208 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1209
1210 if (!(erase_mask & BIT(i)))
1211 continue;
1212
1213 erase = &map->erase_type[i];
1214
1215
1216 if (erase->size > len)
1217 continue;
1218
1219
1220 if (region->offset & SNOR_OVERLAID_REGION)
1221 return erase;
1222
1223 spi_nor_div_by_erase_size(erase, addr, &rem);
1224 if (rem)
1225 continue;
1226 else
1227 return erase;
1228 }
1229
1230 return NULL;
1231}
1232
1233static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
1234{
1235 return region->offset & SNOR_LAST_REGION;
1236}
1237
1238static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
1239{
1240 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
1241}
1242
1243
1244
1245
1246
1247
1248
1249struct spi_nor_erase_region *
1250spi_nor_region_next(struct spi_nor_erase_region *region)
1251{
1252 if (spi_nor_region_is_last(region))
1253 return NULL;
1254 region++;
1255 return region;
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267static struct spi_nor_erase_region *
1268spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1269{
1270 struct spi_nor_erase_region *region = map->regions;
1271 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1272 u64 region_end = region_start + region->size;
1273
1274 while (addr < region_start || addr >= region_end) {
1275 region = spi_nor_region_next(region);
1276 if (!region)
1277 return ERR_PTR(-EINVAL);
1278
1279 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1280 region_end = region_start + region->size;
1281 }
1282
1283 return region;
1284}
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294static struct spi_nor_erase_command *
1295spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1296 const struct spi_nor_erase_type *erase)
1297{
1298 struct spi_nor_erase_command *cmd;
1299
1300 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1301 if (!cmd)
1302 return ERR_PTR(-ENOMEM);
1303
1304 INIT_LIST_HEAD(&cmd->list);
1305 cmd->opcode = erase->opcode;
1306 cmd->count = 1;
1307
1308 if (region->offset & SNOR_OVERLAID_REGION)
1309 cmd->size = region->size;
1310 else
1311 cmd->size = erase->size;
1312
1313 return cmd;
1314}
1315
1316
1317
1318
1319
1320static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1321{
1322 struct spi_nor_erase_command *cmd, *next;
1323
1324 list_for_each_entry_safe(cmd, next, erase_list, list) {
1325 list_del(&cmd->list);
1326 kfree(cmd);
1327 }
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1344 struct list_head *erase_list,
1345 u64 addr, u32 len)
1346{
1347 const struct spi_nor_erase_map *map = &nor->params->erase_map;
1348 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1349 struct spi_nor_erase_region *region;
1350 struct spi_nor_erase_command *cmd = NULL;
1351 u64 region_end;
1352 int ret = -EINVAL;
1353
1354 region = spi_nor_find_erase_region(map, addr);
1355 if (IS_ERR(region))
1356 return PTR_ERR(region);
1357
1358 region_end = spi_nor_region_end(region);
1359
1360 while (len) {
1361 erase = spi_nor_find_best_erase_type(map, region, addr, len);
1362 if (!erase)
1363 goto destroy_erase_cmd_list;
1364
1365 if (prev_erase != erase ||
1366 region->offset & SNOR_OVERLAID_REGION) {
1367 cmd = spi_nor_init_erase_cmd(region, erase);
1368 if (IS_ERR(cmd)) {
1369 ret = PTR_ERR(cmd);
1370 goto destroy_erase_cmd_list;
1371 }
1372
1373 list_add_tail(&cmd->list, erase_list);
1374 } else {
1375 cmd->count++;
1376 }
1377
1378 addr += cmd->size;
1379 len -= cmd->size;
1380
1381 if (len && addr >= region_end) {
1382 region = spi_nor_region_next(region);
1383 if (!region)
1384 goto destroy_erase_cmd_list;
1385 region_end = spi_nor_region_end(region);
1386 }
1387
1388 prev_erase = erase;
1389 }
1390
1391 return 0;
1392
1393destroy_erase_cmd_list:
1394 spi_nor_destroy_erase_cmd_list(erase_list);
1395 return ret;
1396}
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1410{
1411 LIST_HEAD(erase_list);
1412 struct spi_nor_erase_command *cmd, *next;
1413 int ret;
1414
1415 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1416 if (ret)
1417 return ret;
1418
1419 list_for_each_entry_safe(cmd, next, &erase_list, list) {
1420 nor->erase_opcode = cmd->opcode;
1421 while (cmd->count) {
1422 ret = spi_nor_write_enable(nor);
1423 if (ret)
1424 goto destroy_erase_cmd_list;
1425
1426 ret = spi_nor_erase_sector(nor, addr);
1427 if (ret)
1428 goto destroy_erase_cmd_list;
1429
1430 addr += cmd->size;
1431 cmd->count--;
1432
1433 ret = spi_nor_wait_till_ready(nor);
1434 if (ret)
1435 goto destroy_erase_cmd_list;
1436 }
1437 list_del(&cmd->list);
1438 kfree(cmd);
1439 }
1440
1441 return 0;
1442
1443destroy_erase_cmd_list:
1444 spi_nor_destroy_erase_cmd_list(&erase_list);
1445 return ret;
1446}
1447
1448
1449
1450
1451
1452static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1453{
1454 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1455 u32 addr, len;
1456 uint32_t rem;
1457 int ret;
1458
1459 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1460 (long long)instr->len);
1461
1462 if (spi_nor_has_uniform_erase(nor)) {
1463 div_u64_rem(instr->len, mtd->erasesize, &rem);
1464 if (rem)
1465 return -EINVAL;
1466 }
1467
1468 addr = instr->addr;
1469 len = instr->len;
1470
1471 ret = spi_nor_lock_and_prep(nor);
1472 if (ret)
1473 return ret;
1474
1475
1476 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1477 unsigned long timeout;
1478
1479 ret = spi_nor_write_enable(nor);
1480 if (ret)
1481 goto erase_err;
1482
1483 ret = spi_nor_erase_chip(nor);
1484 if (ret)
1485 goto erase_err;
1486
1487
1488
1489
1490
1491
1492
1493 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1494 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1495 (unsigned long)(mtd->size / SZ_2M));
1496 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1497 if (ret)
1498 goto erase_err;
1499
1500
1501
1502
1503
1504
1505
1506 } else if (spi_nor_has_uniform_erase(nor)) {
1507 while (len) {
1508 ret = spi_nor_write_enable(nor);
1509 if (ret)
1510 goto erase_err;
1511
1512 ret = spi_nor_erase_sector(nor, addr);
1513 if (ret)
1514 goto erase_err;
1515
1516 addr += mtd->erasesize;
1517 len -= mtd->erasesize;
1518
1519 ret = spi_nor_wait_till_ready(nor);
1520 if (ret)
1521 goto erase_err;
1522 }
1523
1524
1525 } else {
1526 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1527 if (ret)
1528 goto erase_err;
1529 }
1530
1531 ret = spi_nor_write_disable(nor);
1532
1533erase_err:
1534 spi_nor_unlock_and_unprep(nor);
1535
1536 return ret;
1537}
1538
1539static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor)
1540{
1541 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1542
1543 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6)
1544 return mask | SR_BP3_BIT6;
1545
1546 if (nor->flags & SNOR_F_HAS_4BIT_BP)
1547 return mask | SR_BP3;
1548
1549 return mask;
1550}
1551
1552static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
1553{
1554 if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
1555 return SR_TB_BIT6;
1556 else
1557 return SR_TB_BIT5;
1558}
1559
1560static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
1561{
1562 unsigned int bp_slots, bp_slots_needed;
1563 u8 mask = spi_nor_get_sr_bp_mask(nor);
1564
1565
1566 bp_slots = (1 << hweight8(mask)) - 2;
1567 bp_slots_needed = ilog2(nor->info->n_sectors);
1568
1569 if (bp_slots_needed > bp_slots)
1570 return nor->info->sector_size <<
1571 (bp_slots_needed - bp_slots);
1572 else
1573 return nor->info->sector_size;
1574}
1575
1576static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
1577 uint64_t *len)
1578{
1579 struct mtd_info *mtd = &nor->mtd;
1580 u64 min_prot_len;
1581 u8 mask = spi_nor_get_sr_bp_mask(nor);
1582 u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1583 u8 bp, val = sr & mask;
1584
1585 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6)
1586 val = (val & ~SR_BP3_BIT6) | SR_BP3;
1587
1588 bp = val >> SR_BP_SHIFT;
1589
1590 if (!bp) {
1591
1592 *ofs = 0;
1593 *len = 0;
1594 return;
1595 }
1596
1597 min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1598 *len = min_prot_len << (bp - 1);
1599
1600 if (*len > mtd->size)
1601 *len = mtd->size;
1602
1603 if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
1604 *ofs = 0;
1605 else
1606 *ofs = mtd->size - *len;
1607}
1608
1609
1610
1611
1612
1613static int spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
1614 uint64_t len, u8 sr, bool locked)
1615{
1616 loff_t lock_offs;
1617 uint64_t lock_len;
1618
1619 if (!len)
1620 return 1;
1621
1622 spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
1623
1624 if (locked)
1625
1626 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1627 else
1628
1629 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1630}
1631
1632static int spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1633 u8 sr)
1634{
1635 return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
1636}
1637
1638static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1639 u8 sr)
1640{
1641 return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
1642}
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1678{
1679 struct mtd_info *mtd = &nor->mtd;
1680 u64 min_prot_len;
1681 int ret, status_old, status_new;
1682 u8 mask = spi_nor_get_sr_bp_mask(nor);
1683 u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1684 u8 pow, val;
1685 loff_t lock_len;
1686 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1687 bool use_top;
1688
1689 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1690 if (ret)
1691 return ret;
1692
1693 status_old = nor->bouncebuf[0];
1694
1695
1696 if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
1697 return 0;
1698
1699
1700 if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
1701 can_be_bottom = false;
1702
1703
1704 if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1705 status_old))
1706 can_be_top = false;
1707
1708 if (!can_be_bottom && !can_be_top)
1709 return -EINVAL;
1710
1711
1712 use_top = can_be_top;
1713
1714
1715 if (use_top)
1716 lock_len = mtd->size - ofs;
1717 else
1718 lock_len = ofs + len;
1719
1720 if (lock_len == mtd->size) {
1721 val = mask;
1722 } else {
1723 min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1724 pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
1725 val = pow << SR_BP_SHIFT;
1726
1727 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
1728 val = (val & ~SR_BP3) | SR_BP3_BIT6;
1729
1730 if (val & ~mask)
1731 return -EINVAL;
1732
1733
1734 if (!(val & mask))
1735 return -EINVAL;
1736 }
1737
1738 status_new = (status_old & ~mask & ~tb_mask) | val;
1739
1740
1741 status_new |= SR_SRWD;
1742
1743 if (!use_top)
1744 status_new |= tb_mask;
1745
1746
1747 if (status_new == status_old)
1748 return 0;
1749
1750
1751 if ((status_new & mask) < (status_old & mask))
1752 return -EINVAL;
1753
1754 return spi_nor_write_sr_and_check(nor, status_new);
1755}
1756
1757
1758
1759
1760
1761
1762static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1763{
1764 struct mtd_info *mtd = &nor->mtd;
1765 u64 min_prot_len;
1766 int ret, status_old, status_new;
1767 u8 mask = spi_nor_get_sr_bp_mask(nor);
1768 u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1769 u8 pow, val;
1770 loff_t lock_len;
1771 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1772 bool use_top;
1773
1774 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1775 if (ret)
1776 return ret;
1777
1778 status_old = nor->bouncebuf[0];
1779
1780
1781 if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
1782 return 0;
1783
1784
1785 if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
1786 can_be_top = false;
1787
1788
1789 if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1790 status_old))
1791 can_be_bottom = false;
1792
1793 if (!can_be_bottom && !can_be_top)
1794 return -EINVAL;
1795
1796
1797 use_top = can_be_top;
1798
1799
1800 if (use_top)
1801 lock_len = mtd->size - (ofs + len);
1802 else
1803 lock_len = ofs;
1804
1805 if (lock_len == 0) {
1806 val = 0;
1807 } else {
1808 min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1809 pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
1810 val = pow << SR_BP_SHIFT;
1811
1812 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
1813 val = (val & ~SR_BP3) | SR_BP3_BIT6;
1814
1815
1816 if (val & ~mask)
1817 return -EINVAL;
1818 }
1819
1820 status_new = (status_old & ~mask & ~tb_mask) | val;
1821
1822
1823 if (lock_len == 0)
1824 status_new &= ~SR_SRWD;
1825
1826 if (!use_top)
1827 status_new |= tb_mask;
1828
1829
1830 if (status_new == status_old)
1831 return 0;
1832
1833
1834 if ((status_new & mask) > (status_old & mask))
1835 return -EINVAL;
1836
1837 return spi_nor_write_sr_and_check(nor, status_new);
1838}
1839
1840
1841
1842
1843
1844
1845
1846
1847static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1848{
1849 int ret;
1850
1851 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1852 if (ret)
1853 return ret;
1854
1855 return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
1856}
1857
1858static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
1859 .lock = spi_nor_sr_lock,
1860 .unlock = spi_nor_sr_unlock,
1861 .is_locked = spi_nor_sr_is_locked,
1862};
1863
1864static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1865{
1866 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1867 int ret;
1868
1869 ret = spi_nor_lock_and_prep(nor);
1870 if (ret)
1871 return ret;
1872
1873 ret = nor->params->locking_ops->lock(nor, ofs, len);
1874
1875 spi_nor_unlock_and_unprep(nor);
1876 return ret;
1877}
1878
1879static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1880{
1881 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1882 int ret;
1883
1884 ret = spi_nor_lock_and_prep(nor);
1885 if (ret)
1886 return ret;
1887
1888 ret = nor->params->locking_ops->unlock(nor, ofs, len);
1889
1890 spi_nor_unlock_and_unprep(nor);
1891 return ret;
1892}
1893
1894static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1895{
1896 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1897 int ret;
1898
1899 ret = spi_nor_lock_and_prep(nor);
1900 if (ret)
1901 return ret;
1902
1903 ret = nor->params->locking_ops->is_locked(nor, ofs, len);
1904
1905 spi_nor_unlock_and_unprep(nor);
1906 return ret;
1907}
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
1919{
1920 int ret;
1921
1922 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1923 if (ret)
1924 return ret;
1925
1926 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
1927 return 0;
1928
1929 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
1930
1931 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
1932}
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
1944{
1945 int ret;
1946
1947 if (nor->flags & SNOR_F_NO_READ_CR)
1948 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
1949
1950 ret = spi_nor_read_cr(nor, nor->bouncebuf);
1951 if (ret)
1952 return ret;
1953
1954 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
1955 return 0;
1956
1957 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
1958
1959 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
1975{
1976 u8 *sr2 = nor->bouncebuf;
1977 int ret;
1978 u8 sr2_written;
1979
1980
1981 ret = spi_nor_read_sr2(nor, sr2);
1982 if (ret)
1983 return ret;
1984 if (*sr2 & SR2_QUAD_EN_BIT7)
1985 return 0;
1986
1987
1988 *sr2 |= SR2_QUAD_EN_BIT7;
1989
1990 ret = spi_nor_write_sr2(nor, sr2);
1991 if (ret)
1992 return ret;
1993
1994 sr2_written = *sr2;
1995
1996
1997 ret = spi_nor_read_sr2(nor, sr2);
1998 if (ret)
1999 return ret;
2000
2001 if (*sr2 != sr2_written) {
2002 dev_dbg(nor->dev, "SR2: Read back test failed\n");
2003 return -EIO;
2004 }
2005
2006 return 0;
2007}
2008
2009static const struct spi_nor_manufacturer *manufacturers[] = {
2010 &spi_nor_atmel,
2011 &spi_nor_catalyst,
2012 &spi_nor_eon,
2013 &spi_nor_esmt,
2014 &spi_nor_everspin,
2015 &spi_nor_fujitsu,
2016 &spi_nor_gigadevice,
2017 &spi_nor_intel,
2018 &spi_nor_issi,
2019 &spi_nor_macronix,
2020 &spi_nor_micron,
2021 &spi_nor_st,
2022 &spi_nor_spansion,
2023 &spi_nor_sst,
2024 &spi_nor_winbond,
2025 &spi_nor_xilinx,
2026 &spi_nor_xmc,
2027};
2028
2029static const struct flash_info *
2030spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts,
2031 const u8 *id)
2032{
2033 unsigned int i;
2034
2035 for (i = 0; i < nparts; i++) {
2036 if (parts[i].id_len &&
2037 !memcmp(parts[i].id, id, parts[i].id_len))
2038 return &parts[i];
2039 }
2040
2041 return NULL;
2042}
2043
2044static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2045{
2046 const struct flash_info *info;
2047 u8 *id = nor->bouncebuf;
2048 unsigned int i;
2049 int ret;
2050
2051 if (nor->spimem) {
2052 struct spi_mem_op op =
2053 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
2054 SPI_MEM_OP_NO_ADDR,
2055 SPI_MEM_OP_NO_DUMMY,
2056 SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
2057
2058 ret = spi_mem_exec_op(nor->spimem, &op);
2059 } else {
2060 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
2061 SPI_NOR_MAX_ID_LEN);
2062 }
2063 if (ret) {
2064 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
2065 return ERR_PTR(ret);
2066 }
2067
2068 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
2069 info = spi_nor_search_part_by_id(manufacturers[i]->parts,
2070 manufacturers[i]->nparts,
2071 id);
2072 if (info) {
2073 nor->manufacturer = manufacturers[i];
2074 return info;
2075 }
2076 }
2077
2078 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2079 SPI_NOR_MAX_ID_LEN, id);
2080 return ERR_PTR(-ENODEV);
2081}
2082
2083static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2084 size_t *retlen, u_char *buf)
2085{
2086 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2087 ssize_t ret;
2088
2089 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2090
2091 ret = spi_nor_lock_and_prep(nor);
2092 if (ret)
2093 return ret;
2094
2095 while (len) {
2096 loff_t addr = from;
2097
2098 addr = spi_nor_convert_addr(nor, addr);
2099
2100 ret = spi_nor_read_data(nor, addr, len, buf);
2101 if (ret == 0) {
2102
2103 ret = -EIO;
2104 goto read_err;
2105 }
2106 if (ret < 0)
2107 goto read_err;
2108
2109 WARN_ON(ret > len);
2110 *retlen += ret;
2111 buf += ret;
2112 from += ret;
2113 len -= ret;
2114 }
2115 ret = 0;
2116
2117read_err:
2118 spi_nor_unlock_and_unprep(nor);
2119 return ret;
2120}
2121
2122
2123
2124
2125
2126
2127static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2128 size_t *retlen, const u_char *buf)
2129{
2130 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2131 size_t page_offset, page_remain, i;
2132 ssize_t ret;
2133
2134 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2135
2136 ret = spi_nor_lock_and_prep(nor);
2137 if (ret)
2138 return ret;
2139
2140 for (i = 0; i < len; ) {
2141 ssize_t written;
2142 loff_t addr = to + i;
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152 if (hweight32(nor->page_size) == 1) {
2153 page_offset = addr & (nor->page_size - 1);
2154 } else {
2155 uint64_t aux = addr;
2156
2157 page_offset = do_div(aux, nor->page_size);
2158 }
2159
2160 page_remain = min_t(size_t,
2161 nor->page_size - page_offset, len - i);
2162
2163 addr = spi_nor_convert_addr(nor, addr);
2164
2165 ret = spi_nor_write_enable(nor);
2166 if (ret)
2167 goto write_err;
2168
2169 ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
2170 if (ret < 0)
2171 goto write_err;
2172 written = ret;
2173
2174 ret = spi_nor_wait_till_ready(nor);
2175 if (ret)
2176 goto write_err;
2177 *retlen += written;
2178 i += written;
2179 }
2180
2181write_err:
2182 spi_nor_unlock_and_unprep(nor);
2183 return ret;
2184}
2185
2186static int spi_nor_check(struct spi_nor *nor)
2187{
2188 if (!nor->dev ||
2189 (!nor->spimem && !nor->controller_ops) ||
2190 (!nor->spimem && nor->controller_ops &&
2191 (!nor->controller_ops->read ||
2192 !nor->controller_ops->write ||
2193 !nor->controller_ops->read_reg ||
2194 !nor->controller_ops->write_reg))) {
2195 pr_err("spi-nor: please fill all the necessary fields!\n");
2196 return -EINVAL;
2197 }
2198
2199 if (nor->spimem && nor->controller_ops) {
2200 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2201 return -EINVAL;
2202 }
2203
2204 return 0;
2205}
2206
2207static void
2208spi_nor_set_read_settings(struct spi_nor_read_command *read,
2209 u8 num_mode_clocks,
2210 u8 num_wait_states,
2211 u8 opcode,
2212 enum spi_nor_protocol proto)
2213{
2214 read->num_mode_clocks = num_mode_clocks;
2215 read->num_wait_states = num_wait_states;
2216 read->opcode = opcode;
2217 read->proto = proto;
2218}
2219
2220void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
2221 enum spi_nor_protocol proto)
2222{
2223 pp->opcode = opcode;
2224 pp->proto = proto;
2225}
2226
2227static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2228{
2229 size_t i;
2230
2231 for (i = 0; i < size; i++)
2232 if (table[i][0] == (int)hwcaps)
2233 return table[i][1];
2234
2235 return -EINVAL;
2236}
2237
2238int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2239{
2240 static const int hwcaps_read2cmd[][2] = {
2241 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2242 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2243 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2244 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2245 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2246 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2247 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2248 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2249 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2250 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2251 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2252 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2253 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2254 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2255 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2256 };
2257
2258 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2259 ARRAY_SIZE(hwcaps_read2cmd));
2260}
2261
2262static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2263{
2264 static const int hwcaps_pp2cmd[][2] = {
2265 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2266 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2267 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2268 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2269 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2270 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2271 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2272 };
2273
2274 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2275 ARRAY_SIZE(hwcaps_pp2cmd));
2276}
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286static int spi_nor_spimem_check_op(struct spi_nor *nor,
2287 struct spi_mem_op *op)
2288{
2289
2290
2291
2292
2293
2294
2295 op->addr.nbytes = 4;
2296 if (!spi_mem_supports_op(nor->spimem, op)) {
2297 if (nor->mtd.size > SZ_16M)
2298 return -ENOTSUPP;
2299
2300
2301 op->addr.nbytes = 3;
2302 if (!spi_mem_supports_op(nor->spimem, op))
2303 return -ENOTSUPP;
2304 }
2305
2306 return 0;
2307}
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2318 const struct spi_nor_read_command *read)
2319{
2320 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
2321 SPI_MEM_OP_ADDR(3, 0, 1),
2322 SPI_MEM_OP_DUMMY(0, 1),
2323 SPI_MEM_OP_DATA_IN(0, NULL, 1));
2324
2325 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
2326 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
2327 op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
2328 op.dummy.buswidth = op.addr.buswidth;
2329 op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2330 op.dummy.buswidth / 8;
2331
2332 return spi_nor_spimem_check_op(nor, &op);
2333}
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2344 const struct spi_nor_pp_command *pp)
2345{
2346 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
2347 SPI_MEM_OP_ADDR(3, 0, 1),
2348 SPI_MEM_OP_NO_DUMMY,
2349 SPI_MEM_OP_DATA_OUT(0, NULL, 1));
2350
2351 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
2352 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
2353 op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
2354
2355 return spi_nor_spimem_check_op(nor, &op);
2356}
2357
2358
2359
2360
2361
2362
2363
2364
2365static void
2366spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
2367{
2368 struct spi_nor_flash_parameter *params = nor->params;
2369 unsigned int cap;
2370
2371
2372 *hwcaps &= ~SNOR_HWCAPS_DTR;
2373
2374
2375 *hwcaps &= ~SNOR_HWCAPS_X_X_X;
2376
2377 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2378 int rdidx, ppidx;
2379
2380 if (!(*hwcaps & BIT(cap)))
2381 continue;
2382
2383 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2384 if (rdidx >= 0 &&
2385 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx]))
2386 *hwcaps &= ~BIT(cap);
2387
2388 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2389 if (ppidx < 0)
2390 continue;
2391
2392 if (spi_nor_spimem_check_pp(nor,
2393 ¶ms->page_programs[ppidx]))
2394 *hwcaps &= ~BIT(cap);
2395 }
2396}
2397
2398
2399
2400
2401
2402
2403
2404void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2405 u8 opcode)
2406{
2407 erase->size = size;
2408 erase->opcode = opcode;
2409
2410 erase->size_shift = ffs(erase->size) - 1;
2411 erase->size_mask = (1 << erase->size_shift) - 1;
2412}
2413
2414
2415
2416
2417
2418
2419
2420
2421void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2422 u8 erase_mask, u64 flash_size)
2423{
2424
2425 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2426 SNOR_LAST_REGION;
2427 map->uniform_region.size = flash_size;
2428 map->regions = &map->uniform_region;
2429 map->uniform_erase_type = erase_mask;
2430}
2431
2432int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2433 const struct sfdp_parameter_header *bfpt_header,
2434 const struct sfdp_bfpt *bfpt,
2435 struct spi_nor_flash_parameter *params)
2436{
2437 int ret;
2438
2439 if (nor->manufacturer && nor->manufacturer->fixups &&
2440 nor->manufacturer->fixups->post_bfpt) {
2441 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2442 bfpt, params);
2443 if (ret)
2444 return ret;
2445 }
2446
2447 if (nor->info->fixups && nor->info->fixups->post_bfpt)
2448 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
2449 params);
2450
2451 return 0;
2452}
2453
2454static int spi_nor_select_read(struct spi_nor *nor,
2455 u32 shared_hwcaps)
2456{
2457 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2458 const struct spi_nor_read_command *read;
2459
2460 if (best_match < 0)
2461 return -EINVAL;
2462
2463 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2464 if (cmd < 0)
2465 return -EINVAL;
2466
2467 read = &nor->params->reads[cmd];
2468 nor->read_opcode = read->opcode;
2469 nor->read_proto = read->proto;
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2482 return 0;
2483}
2484
2485static int spi_nor_select_pp(struct spi_nor *nor,
2486 u32 shared_hwcaps)
2487{
2488 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2489 const struct spi_nor_pp_command *pp;
2490
2491 if (best_match < 0)
2492 return -EINVAL;
2493
2494 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2495 if (cmd < 0)
2496 return -EINVAL;
2497
2498 pp = &nor->params->page_programs[cmd];
2499 nor->program_opcode = pp->opcode;
2500 nor->write_proto = pp->proto;
2501 return 0;
2502}
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516static const struct spi_nor_erase_type *
2517spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
2518 const u32 wanted_size)
2519{
2520 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2521 int i;
2522 u8 uniform_erase_type = map->uniform_erase_type;
2523
2524 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2525 if (!(uniform_erase_type & BIT(i)))
2526 continue;
2527
2528 tested_erase = &map->erase_type[i];
2529
2530
2531
2532
2533
2534 if (tested_erase->size == wanted_size) {
2535 erase = tested_erase;
2536 break;
2537 }
2538
2539
2540
2541
2542
2543 if (!erase && tested_erase->size)
2544 erase = tested_erase;
2545
2546 }
2547
2548 if (!erase)
2549 return NULL;
2550
2551
2552 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
2553 map->uniform_erase_type |= BIT(erase - map->erase_type);
2554 return erase;
2555}
2556
2557static int spi_nor_select_erase(struct spi_nor *nor)
2558{
2559 struct spi_nor_erase_map *map = &nor->params->erase_map;
2560 const struct spi_nor_erase_type *erase = NULL;
2561 struct mtd_info *mtd = &nor->mtd;
2562 u32 wanted_size = nor->info->sector_size;
2563 int i;
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
2574
2575 wanted_size = 4096u;
2576#endif
2577
2578 if (spi_nor_has_uniform_erase(nor)) {
2579 erase = spi_nor_select_uniform_erase(map, wanted_size);
2580 if (!erase)
2581 return -EINVAL;
2582 nor->erase_opcode = erase->opcode;
2583 mtd->erasesize = erase->size;
2584 return 0;
2585 }
2586
2587
2588
2589
2590
2591 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2592 if (map->erase_type[i].size) {
2593 erase = &map->erase_type[i];
2594 break;
2595 }
2596 }
2597
2598 if (!erase)
2599 return -EINVAL;
2600
2601 mtd->erasesize = erase->size;
2602 return 0;
2603}
2604
2605static int spi_nor_default_setup(struct spi_nor *nor,
2606 const struct spi_nor_hwcaps *hwcaps)
2607{
2608 struct spi_nor_flash_parameter *params = nor->params;
2609 u32 ignored_mask, shared_mask;
2610 int err;
2611
2612
2613
2614
2615
2616 shared_mask = hwcaps->mask & params->hwcaps.mask;
2617
2618 if (nor->spimem) {
2619
2620
2621
2622
2623
2624 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
2625 } else {
2626
2627
2628
2629
2630
2631 ignored_mask = SNOR_HWCAPS_X_X_X;
2632 if (shared_mask & ignored_mask) {
2633 dev_dbg(nor->dev,
2634 "SPI n-n-n protocols are not supported.\n");
2635 shared_mask &= ~ignored_mask;
2636 }
2637 }
2638
2639
2640 err = spi_nor_select_read(nor, shared_mask);
2641 if (err) {
2642 dev_dbg(nor->dev,
2643 "can't select read settings supported by both the SPI controller and memory.\n");
2644 return err;
2645 }
2646
2647
2648 err = spi_nor_select_pp(nor, shared_mask);
2649 if (err) {
2650 dev_dbg(nor->dev,
2651 "can't select write settings supported by both the SPI controller and memory.\n");
2652 return err;
2653 }
2654
2655
2656 err = spi_nor_select_erase(nor);
2657 if (err) {
2658 dev_dbg(nor->dev,
2659 "can't select erase settings supported by both the SPI controller and memory.\n");
2660 return err;
2661 }
2662
2663 return 0;
2664}
2665
2666static int spi_nor_setup(struct spi_nor *nor,
2667 const struct spi_nor_hwcaps *hwcaps)
2668{
2669 if (!nor->params->setup)
2670 return 0;
2671
2672 return nor->params->setup(nor, hwcaps);
2673}
2674
2675
2676
2677
2678
2679
2680static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2681{
2682 if (nor->manufacturer && nor->manufacturer->fixups &&
2683 nor->manufacturer->fixups->default_init)
2684 nor->manufacturer->fixups->default_init(nor);
2685
2686 if (nor->info->fixups && nor->info->fixups->default_init)
2687 nor->info->fixups->default_init(nor);
2688}
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698static void spi_nor_sfdp_init_params(struct spi_nor *nor)
2699{
2700 struct spi_nor_flash_parameter sfdp_params;
2701
2702 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2703
2704 if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
2705 nor->addr_width = 0;
2706 nor->flags &= ~SNOR_F_4B_OPCODES;
2707 } else {
2708 memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2709 }
2710}
2711
2712
2713
2714
2715
2716
2717static void spi_nor_info_init_params(struct spi_nor *nor)
2718{
2719 struct spi_nor_flash_parameter *params = nor->params;
2720 struct spi_nor_erase_map *map = ¶ms->erase_map;
2721 const struct flash_info *info = nor->info;
2722 struct device_node *np = spi_nor_get_flash_node(nor);
2723 u8 i, erase_mask;
2724
2725
2726 params->quad_enable = spi_nor_sr2_bit1_quad_enable;
2727 params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
2728 params->setup = spi_nor_default_setup;
2729
2730 nor->flags |= SNOR_F_HAS_16BIT_SR;
2731
2732
2733 params->size = (u64)info->sector_size * info->n_sectors;
2734 params->page_size = info->page_size;
2735
2736 if (!(info->flags & SPI_NOR_NO_FR)) {
2737
2738 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2739
2740
2741 if (np && !of_property_read_bool(np, "m25p,fast-read"))
2742 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
2743 }
2744
2745
2746 params->hwcaps.mask |= SNOR_HWCAPS_READ;
2747 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
2748 0, 0, SPINOR_OP_READ,
2749 SNOR_PROTO_1_1_1);
2750
2751 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
2752 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
2753 0, 8, SPINOR_OP_READ_FAST,
2754 SNOR_PROTO_1_1_1);
2755
2756 if (info->flags & SPI_NOR_DUAL_READ) {
2757 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2758 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
2759 0, 8, SPINOR_OP_READ_1_1_2,
2760 SNOR_PROTO_1_1_2);
2761 }
2762
2763 if (info->flags & SPI_NOR_QUAD_READ) {
2764 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2765 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
2766 0, 8, SPINOR_OP_READ_1_1_4,
2767 SNOR_PROTO_1_1_4);
2768 }
2769
2770 if (info->flags & SPI_NOR_OCTAL_READ) {
2771 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2772 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
2773 0, 8, SPINOR_OP_READ_1_1_8,
2774 SNOR_PROTO_1_1_8);
2775 }
2776
2777
2778 params->hwcaps.mask |= SNOR_HWCAPS_PP;
2779 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
2780 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2781
2782
2783
2784
2785
2786 erase_mask = 0;
2787 i = 0;
2788 if (info->flags & SECT_4K_PMC) {
2789 erase_mask |= BIT(i);
2790 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2791 SPINOR_OP_BE_4K_PMC);
2792 i++;
2793 } else if (info->flags & SECT_4K) {
2794 erase_mask |= BIT(i);
2795 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2796 SPINOR_OP_BE_4K);
2797 i++;
2798 }
2799 erase_mask |= BIT(i);
2800 spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
2801 SPINOR_OP_SE);
2802 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2803}
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
2816{
2817 if (nor->manufacturer && nor->manufacturer->fixups &&
2818 nor->manufacturer->fixups->post_sfdp)
2819 nor->manufacturer->fixups->post_sfdp(nor);
2820
2821 if (nor->info->fixups && nor->info->fixups->post_sfdp)
2822 nor->info->fixups->post_sfdp(nor);
2823}
2824
2825
2826
2827
2828
2829
2830
2831
2832static void spi_nor_late_init_params(struct spi_nor *nor)
2833{
2834
2835
2836
2837
2838 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
2839 nor->params->locking_ops = &spi_nor_sr_locking_ops;
2840}
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879static int spi_nor_init_params(struct spi_nor *nor)
2880{
2881 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
2882 if (!nor->params)
2883 return -ENOMEM;
2884
2885 spi_nor_info_init_params(nor);
2886
2887 spi_nor_manufacturer_init_params(nor);
2888
2889 if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
2890 !(nor->info->flags & SPI_NOR_SKIP_SFDP))
2891 spi_nor_sfdp_init_params(nor);
2892
2893 spi_nor_post_sfdp_fixups(nor);
2894
2895 spi_nor_late_init_params(nor);
2896
2897 return 0;
2898}
2899
2900
2901
2902
2903
2904
2905
2906static int spi_nor_quad_enable(struct spi_nor *nor)
2907{
2908 if (!nor->params->quad_enable)
2909 return 0;
2910
2911 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
2912 spi_nor_get_protocol_width(nor->write_proto) == 4))
2913 return 0;
2914
2915 return nor->params->quad_enable(nor);
2916}
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927static int spi_nor_unlock_all(struct spi_nor *nor)
2928{
2929 if (nor->flags & SNOR_F_HAS_LOCK)
2930 return spi_nor_unlock(&nor->mtd, 0, nor->params->size);
2931
2932 return 0;
2933}
2934
2935static int spi_nor_init(struct spi_nor *nor)
2936{
2937 int err;
2938
2939 err = spi_nor_quad_enable(nor);
2940 if (err) {
2941 dev_dbg(nor->dev, "quad mode not supported\n");
2942 return err;
2943 }
2944
2945 err = spi_nor_unlock_all(nor);
2946 if (err) {
2947 dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
2948 return err;
2949 }
2950
2951 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
2952
2953
2954
2955
2956
2957
2958
2959 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
2960 "enabling reset hack; may not recover from unexpected reboots\n");
2961 nor->params->set_4byte_addr_mode(nor, true);
2962 }
2963
2964 return 0;
2965}
2966
2967
2968static void spi_nor_resume(struct mtd_info *mtd)
2969{
2970 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2971 struct device *dev = nor->dev;
2972 int ret;
2973
2974
2975 ret = spi_nor_init(nor);
2976 if (ret)
2977 dev_err(dev, "resume() failed\n");
2978}
2979
2980void spi_nor_restore(struct spi_nor *nor)
2981{
2982
2983 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
2984 nor->flags & SNOR_F_BROKEN_RESET)
2985 nor->params->set_4byte_addr_mode(nor, false);
2986}
2987EXPORT_SYMBOL_GPL(spi_nor_restore);
2988
2989static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
2990 const char *name)
2991{
2992 unsigned int i, j;
2993
2994 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
2995 for (j = 0; j < manufacturers[i]->nparts; j++) {
2996 if (!strcmp(name, manufacturers[i]->parts[j].name)) {
2997 nor->manufacturer = manufacturers[i];
2998 return &manufacturers[i]->parts[j];
2999 }
3000 }
3001 }
3002
3003 return NULL;
3004}
3005
3006static int spi_nor_set_addr_width(struct spi_nor *nor)
3007{
3008 if (nor->addr_width) {
3009
3010 } else if (nor->info->addr_width) {
3011 nor->addr_width = nor->info->addr_width;
3012 } else if (nor->mtd.size > 0x1000000) {
3013
3014 nor->addr_width = 4;
3015 } else {
3016 nor->addr_width = 3;
3017 }
3018
3019 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
3020 dev_dbg(nor->dev, "address width is too large: %u\n",
3021 nor->addr_width);
3022 return -EINVAL;
3023 }
3024
3025
3026 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
3027 !(nor->flags & SNOR_F_HAS_4BAIT))
3028 spi_nor_set_4byte_opcodes(nor);
3029
3030 return 0;
3031}
3032
3033static void spi_nor_debugfs_init(struct spi_nor *nor,
3034 const struct flash_info *info)
3035{
3036 struct mtd_info *mtd = &nor->mtd;
3037
3038 mtd->dbg.partname = info->name;
3039 mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
3040 info->id_len, info->id);
3041}
3042
3043static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
3044 const char *name)
3045{
3046 const struct flash_info *info = NULL;
3047
3048 if (name)
3049 info = spi_nor_match_id(nor, name);
3050
3051 if (!info)
3052 info = spi_nor_read_id(nor);
3053 if (IS_ERR_OR_NULL(info))
3054 return ERR_PTR(-ENOENT);
3055
3056
3057
3058
3059
3060 if (name && info->id_len) {
3061 const struct flash_info *jinfo;
3062
3063 jinfo = spi_nor_read_id(nor);
3064 if (IS_ERR(jinfo)) {
3065 return jinfo;
3066 } else if (jinfo != info) {
3067
3068
3069
3070
3071
3072
3073
3074 dev_warn(nor->dev, "found %s, expected %s\n",
3075 jinfo->name, info->name);
3076 info = jinfo;
3077 }
3078 }
3079
3080 return info;
3081}
3082
3083int spi_nor_scan(struct spi_nor *nor, const char *name,
3084 const struct spi_nor_hwcaps *hwcaps)
3085{
3086 const struct flash_info *info;
3087 struct device *dev = nor->dev;
3088 struct mtd_info *mtd = &nor->mtd;
3089 struct device_node *np = spi_nor_get_flash_node(nor);
3090 int ret;
3091 int i;
3092
3093 ret = spi_nor_check(nor);
3094 if (ret)
3095 return ret;
3096
3097
3098 nor->reg_proto = SNOR_PROTO_1_1_1;
3099 nor->read_proto = SNOR_PROTO_1_1_1;
3100 nor->write_proto = SNOR_PROTO_1_1_1;
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110 nor->bouncebuf_size = PAGE_SIZE;
3111 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3112 GFP_KERNEL);
3113 if (!nor->bouncebuf)
3114 return -ENOMEM;
3115
3116 info = spi_nor_get_flash_info(nor, name);
3117 if (IS_ERR(info))
3118 return PTR_ERR(info);
3119
3120 nor->info = info;
3121
3122 spi_nor_debugfs_init(nor, info);
3123
3124 mutex_init(&nor->lock);
3125
3126
3127
3128
3129
3130
3131 if (info->flags & SPI_NOR_XSR_RDY)
3132 nor->flags |= SNOR_F_READY_XSR_RDY;
3133
3134 if (info->flags & SPI_NOR_HAS_LOCK)
3135 nor->flags |= SNOR_F_HAS_LOCK;
3136
3137 mtd->_write = spi_nor_write;
3138
3139
3140 ret = spi_nor_init_params(nor);
3141 if (ret)
3142 return ret;
3143
3144 if (!mtd->name)
3145 mtd->name = dev_name(dev);
3146 mtd->priv = nor;
3147 mtd->type = MTD_NORFLASH;
3148 mtd->writesize = 1;
3149 mtd->flags = MTD_CAP_NORFLASH;
3150 mtd->size = nor->params->size;
3151 mtd->_erase = spi_nor_erase;
3152 mtd->_read = spi_nor_read;
3153 mtd->_resume = spi_nor_resume;
3154
3155 if (nor->params->locking_ops) {
3156 mtd->_lock = spi_nor_lock;
3157 mtd->_unlock = spi_nor_unlock;
3158 mtd->_is_locked = spi_nor_is_locked;
3159 }
3160
3161 if (info->flags & USE_FSR)
3162 nor->flags |= SNOR_F_USE_FSR;
3163 if (info->flags & SPI_NOR_HAS_TB) {
3164 nor->flags |= SNOR_F_HAS_SR_TB;
3165 if (info->flags & SPI_NOR_TB_SR_BIT6)
3166 nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
3167 }
3168
3169 if (info->flags & NO_CHIP_ERASE)
3170 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
3171 if (info->flags & USE_CLSR)
3172 nor->flags |= SNOR_F_USE_CLSR;
3173
3174 if (info->flags & SPI_NOR_4BIT_BP) {
3175 nor->flags |= SNOR_F_HAS_4BIT_BP;
3176 if (info->flags & SPI_NOR_BP3_SR_BIT6)
3177 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
3178 }
3179
3180 if (info->flags & SPI_NOR_NO_ERASE)
3181 mtd->flags |= MTD_NO_ERASE;
3182
3183 mtd->dev.parent = dev;
3184 nor->page_size = nor->params->page_size;
3185 mtd->writebufsize = nor->page_size;
3186
3187 if (of_property_read_bool(np, "broken-flash-reset"))
3188 nor->flags |= SNOR_F_BROKEN_RESET;
3189
3190
3191
3192
3193
3194
3195
3196 ret = spi_nor_setup(nor, hwcaps);
3197 if (ret)
3198 return ret;
3199
3200 if (info->flags & SPI_NOR_4B_OPCODES)
3201 nor->flags |= SNOR_F_4B_OPCODES;
3202
3203 ret = spi_nor_set_addr_width(nor);
3204 if (ret)
3205 return ret;
3206
3207
3208 ret = spi_nor_init(nor);
3209 if (ret)
3210 return ret;
3211
3212 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
3213 (long long)mtd->size >> 10);
3214
3215 dev_dbg(dev,
3216 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
3217 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
3218 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
3219 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
3220
3221 if (mtd->numeraseregions)
3222 for (i = 0; i < mtd->numeraseregions; i++)
3223 dev_dbg(dev,
3224 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
3225 ".erasesize = 0x%.8x (%uKiB), "
3226 ".numblocks = %d }\n",
3227 i, (long long)mtd->eraseregions[i].offset,
3228 mtd->eraseregions[i].erasesize,
3229 mtd->eraseregions[i].erasesize / 1024,
3230 mtd->eraseregions[i].numblocks);
3231 return 0;
3232}
3233EXPORT_SYMBOL_GPL(spi_nor_scan);
3234
3235static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3236{
3237 struct spi_mem_dirmap_info info = {
3238 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
3239 SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
3240 SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
3241 SPI_MEM_OP_DATA_IN(0, NULL, 1)),
3242 .offset = 0,
3243 .length = nor->mtd.size,
3244 };
3245 struct spi_mem_op *op = &info.op_tmpl;
3246
3247
3248 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
3249 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
3250 op->dummy.buswidth = op->addr.buswidth;
3251 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3252
3253
3254 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3255
3256 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3257 &info);
3258 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3259}
3260
3261static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3262{
3263 struct spi_mem_dirmap_info info = {
3264 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
3265 SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
3266 SPI_MEM_OP_NO_DUMMY,
3267 SPI_MEM_OP_DATA_OUT(0, NULL, 1)),
3268 .offset = 0,
3269 .length = nor->mtd.size,
3270 };
3271 struct spi_mem_op *op = &info.op_tmpl;
3272
3273
3274 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
3275 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
3276 op->dummy.buswidth = op->addr.buswidth;
3277 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3278
3279 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3280 op->addr.nbytes = 0;
3281
3282 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3283 &info);
3284 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3285}
3286
3287static int spi_nor_probe(struct spi_mem *spimem)
3288{
3289 struct spi_device *spi = spimem->spi;
3290 struct flash_platform_data *data = dev_get_platdata(&spi->dev);
3291 struct spi_nor *nor;
3292
3293
3294
3295
3296 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3297 char *flash_name;
3298 int ret;
3299
3300 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
3301 if (!nor)
3302 return -ENOMEM;
3303
3304 nor->spimem = spimem;
3305 nor->dev = &spi->dev;
3306 spi_nor_set_flash_node(nor, spi->dev.of_node);
3307
3308 spi_mem_set_drvdata(spimem, nor);
3309
3310 if (data && data->name)
3311 nor->mtd.name = data->name;
3312
3313 if (!nor->mtd.name)
3314 nor->mtd.name = spi_mem_get_name(spimem);
3315
3316
3317
3318
3319
3320
3321
3322 if (data && data->type)
3323 flash_name = data->type;
3324 else if (!strcmp(spi->modalias, "spi-nor"))
3325 flash_name = NULL;
3326 else
3327 flash_name = spi->modalias;
3328
3329 ret = spi_nor_scan(nor, flash_name, &hwcaps);
3330 if (ret)
3331 return ret;
3332
3333
3334
3335
3336
3337
3338 if (nor->page_size > PAGE_SIZE) {
3339 nor->bouncebuf_size = nor->page_size;
3340 devm_kfree(nor->dev, nor->bouncebuf);
3341 nor->bouncebuf = devm_kmalloc(nor->dev,
3342 nor->bouncebuf_size,
3343 GFP_KERNEL);
3344 if (!nor->bouncebuf)
3345 return -ENOMEM;
3346 }
3347
3348 ret = spi_nor_create_read_dirmap(nor);
3349 if (ret)
3350 return ret;
3351
3352 ret = spi_nor_create_write_dirmap(nor);
3353 if (ret)
3354 return ret;
3355
3356 return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3357 data ? data->nr_parts : 0);
3358}
3359
3360static int spi_nor_remove(struct spi_mem *spimem)
3361{
3362 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3363
3364 spi_nor_restore(nor);
3365
3366
3367 return mtd_device_unregister(&nor->mtd);
3368}
3369
3370static void spi_nor_shutdown(struct spi_mem *spimem)
3371{
3372 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3373
3374 spi_nor_restore(nor);
3375}
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389static const struct spi_device_id spi_nor_dev_ids[] = {
3390
3391
3392
3393
3394
3395 {"spi-nor"},
3396
3397
3398
3399
3400
3401 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
3402
3403
3404
3405
3406
3407 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
3408 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3409 {"mx25l25635e"},{"mx66l51235l"},
3410 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3411 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
3412 {"s25fl064k"},
3413 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3414 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
3415 {"m25p64"}, {"m25p128"},
3416 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
3417 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
3418
3419
3420 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
3421 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
3422 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
3423
3424
3425 { "mr25h128" },
3426 { "mr25h256" },
3427 { "mr25h10" },
3428 { "mr25h40" },
3429
3430 { },
3431};
3432MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3433
3434static const struct of_device_id spi_nor_of_table[] = {
3435
3436
3437
3438
3439 { .compatible = "jedec,spi-nor" },
3440 { },
3441};
3442MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3443
3444
3445
3446
3447
3448
3449static struct spi_mem_driver spi_nor_driver = {
3450 .spidrv = {
3451 .driver = {
3452 .name = "spi-nor",
3453 .of_match_table = spi_nor_of_table,
3454 },
3455 .id_table = spi_nor_dev_ids,
3456 },
3457 .probe = spi_nor_probe,
3458 .remove = spi_nor_remove,
3459 .shutdown = spi_nor_shutdown,
3460};
3461module_spi_mem_driver(spi_nor_driver);
3462
3463MODULE_LICENSE("GPL v2");
3464MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3465MODULE_AUTHOR("Mike Lavender");
3466MODULE_DESCRIPTION("framework for SPI NOR");
3467