1
2
3
4
5
6
7
8
9
10#include <linux/err.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/mutex.h>
15#include <linux/math64.h>
16#include <linux/sizes.h>
17#include <linux/slab.h>
18
19#include <linux/mtd/mtd.h>
20#include <linux/of_platform.h>
21#include <linux/sched/task_stack.h>
22#include <linux/spi/flash.h>
23#include <linux/mtd/spi-nor.h>
24
25#include "core.h"
26
27
28
29
30
31
32
33#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
34
35
36
37
38
39#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
40
41#define SPI_NOR_MAX_ADDR_WIDTH 4
42
43#define SPI_NOR_SRST_SLEEP_MIN 200
44#define SPI_NOR_SRST_SLEEP_MAX 400
45
46
47
48
49
50
51
52
53
54
55
56
57static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
58 const struct spi_mem_op *op)
59{
60 switch (nor->cmd_ext_type) {
61 case SPI_NOR_EXT_INVERT:
62 return ~op->cmd.opcode;
63
64 case SPI_NOR_EXT_REPEAT:
65 return op->cmd.opcode;
66
67 default:
68 dev_err(nor->dev, "Unknown command extension type\n");
69 return 0;
70 }
71}
72
73
74
75
76
77
78
79
80void spi_nor_spimem_setup_op(const struct spi_nor *nor,
81 struct spi_mem_op *op,
82 const enum spi_nor_protocol proto)
83{
84 u8 ext;
85
86 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
87
88 if (op->addr.nbytes)
89 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
90
91 if (op->dummy.nbytes)
92 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
93
94 if (op->data.nbytes)
95 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
96
97 if (spi_nor_protocol_is_dtr(proto)) {
98
99
100
101
102
103
104 op->cmd.dtr = true;
105 op->addr.dtr = true;
106 op->dummy.dtr = true;
107 op->data.dtr = true;
108
109
110 op->dummy.nbytes *= 2;
111
112 ext = spi_nor_get_cmd_ext(nor, op);
113 op->cmd.opcode = (op->cmd.opcode << 8) | ext;
114 op->cmd.nbytes = 2;
115 }
116}
117
118
119
120
121
122
123
124
125
126
127
128static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
129{
130
131 if (object_is_on_stack(op->data.buf.in) ||
132 !virt_addr_valid(op->data.buf.in)) {
133 if (op->data.nbytes > nor->bouncebuf_size)
134 op->data.nbytes = nor->bouncebuf_size;
135 op->data.buf.in = nor->bouncebuf;
136 return true;
137 }
138
139 return false;
140}
141
142
143
144
145
146
147
148
149static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
150{
151 int error;
152
153 error = spi_mem_adjust_op_size(nor->spimem, op);
154 if (error)
155 return error;
156
157 return spi_mem_exec_op(nor->spimem, op);
158}
159
160static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
161 u8 *buf, size_t len)
162{
163 if (spi_nor_protocol_is_dtr(nor->reg_proto))
164 return -EOPNOTSUPP;
165
166 return nor->controller_ops->read_reg(nor, opcode, buf, len);
167}
168
169static int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
170 const u8 *buf, size_t len)
171{
172 if (spi_nor_protocol_is_dtr(nor->reg_proto))
173 return -EOPNOTSUPP;
174
175 return nor->controller_ops->write_reg(nor, opcode, buf, len);
176}
177
178static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
179{
180 if (spi_nor_protocol_is_dtr(nor->write_proto))
181 return -EOPNOTSUPP;
182
183 return nor->controller_ops->erase(nor, offs);
184}
185
186
187
188
189
190
191
192
193
194
195
196static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
197 size_t len, u8 *buf)
198{
199 struct spi_mem_op op =
200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
201 SPI_MEM_OP_ADDR(nor->addr_width, from, 0),
202 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
203 SPI_MEM_OP_DATA_IN(len, buf, 0));
204 bool usebouncebuf;
205 ssize_t nbytes;
206 int error;
207
208 spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
209
210
211 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
212 if (spi_nor_protocol_is_dtr(nor->read_proto))
213 op.dummy.nbytes *= 2;
214
215 usebouncebuf = spi_nor_spimem_bounce(nor, &op);
216
217 if (nor->dirmap.rdesc) {
218 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
219 op.data.nbytes, op.data.buf.in);
220 } else {
221 error = spi_nor_spimem_exec_op(nor, &op);
222 if (error)
223 return error;
224 nbytes = op.data.nbytes;
225 }
226
227 if (usebouncebuf && nbytes > 0)
228 memcpy(buf, op.data.buf.in, nbytes);
229
230 return nbytes;
231}
232
233
234
235
236
237
238
239
240
241
242ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
243{
244 if (nor->spimem)
245 return spi_nor_spimem_read_data(nor, from, len, buf);
246
247 return nor->controller_ops->read(nor, from, len, buf);
248}
249
250
251
252
253
254
255
256
257
258
259
260static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
261 size_t len, const u8 *buf)
262{
263 struct spi_mem_op op =
264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
265 SPI_MEM_OP_ADDR(nor->addr_width, to, 0),
266 SPI_MEM_OP_NO_DUMMY,
267 SPI_MEM_OP_DATA_OUT(len, buf, 0));
268 ssize_t nbytes;
269 int error;
270
271 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
272 op.addr.nbytes = 0;
273
274 spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
275
276 if (spi_nor_spimem_bounce(nor, &op))
277 memcpy(nor->bouncebuf, buf, op.data.nbytes);
278
279 if (nor->dirmap.wdesc) {
280 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
281 op.data.nbytes, op.data.buf.out);
282 } else {
283 error = spi_nor_spimem_exec_op(nor, &op);
284 if (error)
285 return error;
286 nbytes = op.data.nbytes;
287 }
288
289 return nbytes;
290}
291
292
293
294
295
296
297
298
299
300
301ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
302 const u8 *buf)
303{
304 if (nor->spimem)
305 return spi_nor_spimem_write_data(nor, to, len, buf);
306
307 return nor->controller_ops->write(nor, to, len, buf);
308}
309
310
311
312
313
314
315
316int spi_nor_write_enable(struct spi_nor *nor)
317{
318 int ret;
319
320 if (nor->spimem) {
321 struct spi_mem_op op =
322 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0),
323 SPI_MEM_OP_NO_ADDR,
324 SPI_MEM_OP_NO_DUMMY,
325 SPI_MEM_OP_NO_DATA);
326
327 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
328
329 ret = spi_mem_exec_op(nor->spimem, &op);
330 } else {
331 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
332 NULL, 0);
333 }
334
335 if (ret)
336 dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
337
338 return ret;
339}
340
341
342
343
344
345
346
347int spi_nor_write_disable(struct spi_nor *nor)
348{
349 int ret;
350
351 if (nor->spimem) {
352 struct spi_mem_op op =
353 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0),
354 SPI_MEM_OP_NO_ADDR,
355 SPI_MEM_OP_NO_DUMMY,
356 SPI_MEM_OP_NO_DATA);
357
358 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
359
360 ret = spi_mem_exec_op(nor->spimem, &op);
361 } else {
362 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
363 NULL, 0);
364 }
365
366 if (ret)
367 dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
368
369 return ret;
370}
371
372
373
374
375
376
377
378
379
380int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
381{
382 int ret;
383
384 if (nor->spimem) {
385 struct spi_mem_op op =
386 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0),
387 SPI_MEM_OP_NO_ADDR,
388 SPI_MEM_OP_NO_DUMMY,
389 SPI_MEM_OP_DATA_IN(1, sr, 0));
390
391 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
392 op.addr.nbytes = nor->params->rdsr_addr_nbytes;
393 op.dummy.nbytes = nor->params->rdsr_dummy;
394
395
396
397
398 op.data.nbytes = 2;
399 }
400
401 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
402
403 ret = spi_mem_exec_op(nor->spimem, &op);
404 } else {
405 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
406 1);
407 }
408
409 if (ret)
410 dev_dbg(nor->dev, "error %d reading SR\n", ret);
411
412 return ret;
413}
414
415
416
417
418
419
420
421
422
423
424static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
425{
426 int ret;
427
428 if (nor->spimem) {
429 struct spi_mem_op op =
430 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0),
431 SPI_MEM_OP_NO_ADDR,
432 SPI_MEM_OP_NO_DUMMY,
433 SPI_MEM_OP_DATA_IN(1, fsr, 0));
434
435 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
436 op.addr.nbytes = nor->params->rdsr_addr_nbytes;
437 op.dummy.nbytes = nor->params->rdsr_dummy;
438
439
440
441
442 op.data.nbytes = 2;
443 }
444
445 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
446
447 ret = spi_mem_exec_op(nor->spimem, &op);
448 } else {
449 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr,
450 1);
451 }
452
453 if (ret)
454 dev_dbg(nor->dev, "error %d reading FSR\n", ret);
455
456 return ret;
457}
458
459
460
461
462
463
464
465
466
467
468int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
469{
470 int ret;
471
472 if (nor->spimem) {
473 struct spi_mem_op op =
474 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0),
475 SPI_MEM_OP_NO_ADDR,
476 SPI_MEM_OP_NO_DUMMY,
477 SPI_MEM_OP_DATA_IN(1, cr, 0));
478
479 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
480
481 ret = spi_mem_exec_op(nor->spimem, &op);
482 } else {
483 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
484 1);
485 }
486
487 if (ret)
488 dev_dbg(nor->dev, "error %d reading CR\n", ret);
489
490 return ret;
491}
492
493
494
495
496
497
498
499
500
501int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
502{
503 int ret;
504
505 if (nor->spimem) {
506 struct spi_mem_op op =
507 SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
508 SPINOR_OP_EN4B :
509 SPINOR_OP_EX4B,
510 0),
511 SPI_MEM_OP_NO_ADDR,
512 SPI_MEM_OP_NO_DUMMY,
513 SPI_MEM_OP_NO_DATA);
514
515 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
516
517 ret = spi_mem_exec_op(nor->spimem, &op);
518 } else {
519 ret = spi_nor_controller_ops_write_reg(nor,
520 enable ? SPINOR_OP_EN4B :
521 SPINOR_OP_EX4B,
522 NULL, 0);
523 }
524
525 if (ret)
526 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
527
528 return ret;
529}
530
531
532
533
534
535
536
537
538
539
540static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
541{
542 int ret;
543
544 nor->bouncebuf[0] = enable << 7;
545
546 if (nor->spimem) {
547 struct spi_mem_op op =
548 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0),
549 SPI_MEM_OP_NO_ADDR,
550 SPI_MEM_OP_NO_DUMMY,
551 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0));
552
553 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
554
555 ret = spi_mem_exec_op(nor->spimem, &op);
556 } else {
557 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
558 nor->bouncebuf, 1);
559 }
560
561 if (ret)
562 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
563
564 return ret;
565}
566
567
568
569
570
571
572
573
574int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
575{
576 int ret;
577
578 nor->bouncebuf[0] = ear;
579
580 if (nor->spimem) {
581 struct spi_mem_op op =
582 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 0),
583 SPI_MEM_OP_NO_ADDR,
584 SPI_MEM_OP_NO_DUMMY,
585 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0));
586
587 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
588
589 ret = spi_mem_exec_op(nor->spimem, &op);
590 } else {
591 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREAR,
592 nor->bouncebuf, 1);
593 }
594
595 if (ret)
596 dev_dbg(nor->dev, "error %d writing EAR\n", ret);
597
598 return ret;
599}
600
601
602
603
604
605
606
607
608
609int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
610{
611 int ret;
612
613 if (nor->spimem) {
614 struct spi_mem_op op =
615 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0),
616 SPI_MEM_OP_NO_ADDR,
617 SPI_MEM_OP_NO_DUMMY,
618 SPI_MEM_OP_DATA_IN(1, sr, 0));
619
620 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
621
622 ret = spi_mem_exec_op(nor->spimem, &op);
623 } else {
624 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr,
625 1);
626 }
627
628 if (ret)
629 dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
630
631 return ret;
632}
633
634
635
636
637
638
639
640
641static int spi_nor_xsr_ready(struct spi_nor *nor)
642{
643 int ret;
644
645 ret = spi_nor_xread_sr(nor, nor->bouncebuf);
646 if (ret)
647 return ret;
648
649 return !!(nor->bouncebuf[0] & XSR_RDY);
650}
651
652
653
654
655
656static void spi_nor_clear_sr(struct spi_nor *nor)
657{
658 int ret;
659
660 if (nor->spimem) {
661 struct spi_mem_op op =
662 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0),
663 SPI_MEM_OP_NO_ADDR,
664 SPI_MEM_OP_NO_DUMMY,
665 SPI_MEM_OP_NO_DATA);
666
667 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
668
669 ret = spi_mem_exec_op(nor->spimem, &op);
670 } else {
671 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR,
672 NULL, 0);
673 }
674
675 if (ret)
676 dev_dbg(nor->dev, "error %d clearing SR\n", ret);
677}
678
679
680
681
682
683
684
685
686static int spi_nor_sr_ready(struct spi_nor *nor)
687{
688 int ret = spi_nor_read_sr(nor, nor->bouncebuf);
689
690 if (ret)
691 return ret;
692
693 if (nor->flags & SNOR_F_USE_CLSR &&
694 nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
695 if (nor->bouncebuf[0] & SR_E_ERR)
696 dev_err(nor->dev, "Erase Error occurred\n");
697 else
698 dev_err(nor->dev, "Programming Error occurred\n");
699
700 spi_nor_clear_sr(nor);
701
702
703
704
705
706
707
708 ret = spi_nor_write_disable(nor);
709 if (ret)
710 return ret;
711
712 return -EIO;
713 }
714
715 return !(nor->bouncebuf[0] & SR_WIP);
716}
717
718
719
720
721
722static void spi_nor_clear_fsr(struct spi_nor *nor)
723{
724 int ret;
725
726 if (nor->spimem) {
727 struct spi_mem_op op =
728 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0),
729 SPI_MEM_OP_NO_ADDR,
730 SPI_MEM_OP_NO_DUMMY,
731 SPI_MEM_OP_NO_DATA);
732
733 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
734
735 ret = spi_mem_exec_op(nor->spimem, &op);
736 } else {
737 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR,
738 NULL, 0);
739 }
740
741 if (ret)
742 dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
743}
744
745
746
747
748
749
750
751
752static int spi_nor_fsr_ready(struct spi_nor *nor)
753{
754 int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
755
756 if (ret)
757 return ret;
758
759 if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
760 if (nor->bouncebuf[0] & FSR_E_ERR)
761 dev_err(nor->dev, "Erase operation failed.\n");
762 else
763 dev_err(nor->dev, "Program operation failed.\n");
764
765 if (nor->bouncebuf[0] & FSR_PT_ERR)
766 dev_err(nor->dev,
767 "Attempted to modify a protected sector.\n");
768
769 spi_nor_clear_fsr(nor);
770
771
772
773
774
775
776
777 ret = spi_nor_write_disable(nor);
778 if (ret)
779 return ret;
780
781 return -EIO;
782 }
783
784 return !!(nor->bouncebuf[0] & FSR_READY);
785}
786
787
788
789
790
791
792
793static int spi_nor_ready(struct spi_nor *nor)
794{
795 int sr, fsr;
796
797 if (nor->flags & SNOR_F_READY_XSR_RDY)
798 sr = spi_nor_xsr_ready(nor);
799 else
800 sr = spi_nor_sr_ready(nor);
801 if (sr < 0)
802 return sr;
803 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
804 if (fsr < 0)
805 return fsr;
806 return sr && fsr;
807}
808
809
810
811
812
813
814
815
816
817static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
818 unsigned long timeout_jiffies)
819{
820 unsigned long deadline;
821 int timeout = 0, ret;
822
823 deadline = jiffies + timeout_jiffies;
824
825 while (!timeout) {
826 if (time_after_eq(jiffies, deadline))
827 timeout = 1;
828
829 ret = spi_nor_ready(nor);
830 if (ret < 0)
831 return ret;
832 if (ret)
833 return 0;
834
835 cond_resched();
836 }
837
838 dev_dbg(nor->dev, "flash operation timed out\n");
839
840 return -ETIMEDOUT;
841}
842
843
844
845
846
847
848
849
850int spi_nor_wait_till_ready(struct spi_nor *nor)
851{
852 return spi_nor_wait_till_ready_with_timeout(nor,
853 DEFAULT_READY_WAIT_JIFFIES);
854}
855
856
857
858
859
860
861
862int spi_nor_global_block_unlock(struct spi_nor *nor)
863{
864 int ret;
865
866 ret = spi_nor_write_enable(nor);
867 if (ret)
868 return ret;
869
870 if (nor->spimem) {
871 struct spi_mem_op op =
872 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_GBULK, 0),
873 SPI_MEM_OP_NO_ADDR,
874 SPI_MEM_OP_NO_DUMMY,
875 SPI_MEM_OP_NO_DATA);
876
877 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
878
879 ret = spi_mem_exec_op(nor->spimem, &op);
880 } else {
881 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
882 NULL, 0);
883 }
884
885 if (ret) {
886 dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
887 return ret;
888 }
889
890 return spi_nor_wait_till_ready(nor);
891}
892
893
894
895
896
897
898
899
900
901int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
902{
903 int ret;
904
905 ret = spi_nor_write_enable(nor);
906 if (ret)
907 return ret;
908
909 if (nor->spimem) {
910 struct spi_mem_op op =
911 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0),
912 SPI_MEM_OP_NO_ADDR,
913 SPI_MEM_OP_NO_DUMMY,
914 SPI_MEM_OP_DATA_OUT(len, sr, 0));
915
916 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
917
918 ret = spi_mem_exec_op(nor->spimem, &op);
919 } else {
920 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
921 len);
922 }
923
924 if (ret) {
925 dev_dbg(nor->dev, "error %d writing SR\n", ret);
926 return ret;
927 }
928
929 return spi_nor_wait_till_ready(nor);
930}
931
932
933
934
935
936
937
938
939
940static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
941{
942 int ret;
943
944 nor->bouncebuf[0] = sr1;
945
946 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
947 if (ret)
948 return ret;
949
950 ret = spi_nor_read_sr(nor, nor->bouncebuf);
951 if (ret)
952 return ret;
953
954 if (nor->bouncebuf[0] != sr1) {
955 dev_dbg(nor->dev, "SR1: read back test failed\n");
956 return -EIO;
957 }
958
959 return 0;
960}
961
962
963
964
965
966
967
968
969
970
971
972static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
973{
974 int ret;
975 u8 *sr_cr = nor->bouncebuf;
976 u8 cr_written;
977
978
979 if (!(nor->flags & SNOR_F_NO_READ_CR)) {
980 ret = spi_nor_read_cr(nor, &sr_cr[1]);
981 if (ret)
982 return ret;
983 } else if (nor->params->quad_enable) {
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999 sr_cr[1] = SR2_QUAD_EN_BIT1;
1000 } else {
1001 sr_cr[1] = 0;
1002 }
1003
1004 sr_cr[0] = sr1;
1005
1006 ret = spi_nor_write_sr(nor, sr_cr, 2);
1007 if (ret)
1008 return ret;
1009
1010 if (nor->flags & SNOR_F_NO_READ_CR)
1011 return 0;
1012
1013 cr_written = sr_cr[1];
1014
1015 ret = spi_nor_read_cr(nor, &sr_cr[1]);
1016 if (ret)
1017 return ret;
1018
1019 if (cr_written != sr_cr[1]) {
1020 dev_dbg(nor->dev, "CR: read back test failed\n");
1021 return -EIO;
1022 }
1023
1024 return 0;
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
1038{
1039 int ret;
1040 u8 *sr_cr = nor->bouncebuf;
1041 u8 sr_written;
1042
1043
1044 ret = spi_nor_read_sr(nor, sr_cr);
1045 if (ret)
1046 return ret;
1047
1048 sr_cr[1] = cr;
1049
1050 ret = spi_nor_write_sr(nor, sr_cr, 2);
1051 if (ret)
1052 return ret;
1053
1054 sr_written = sr_cr[0];
1055
1056 ret = spi_nor_read_sr(nor, sr_cr);
1057 if (ret)
1058 return ret;
1059
1060 if (sr_written != sr_cr[0]) {
1061 dev_dbg(nor->dev, "SR: Read back test failed\n");
1062 return -EIO;
1063 }
1064
1065 if (nor->flags & SNOR_F_NO_READ_CR)
1066 return 0;
1067
1068 ret = spi_nor_read_cr(nor, &sr_cr[1]);
1069 if (ret)
1070 return ret;
1071
1072 if (cr != sr_cr[1]) {
1073 dev_dbg(nor->dev, "CR: read back test failed\n");
1074 return -EIO;
1075 }
1076
1077 return 0;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
1090{
1091 if (nor->flags & SNOR_F_HAS_16BIT_SR)
1092 return spi_nor_write_16bit_sr_and_check(nor, sr1);
1093
1094 return spi_nor_write_sr1_and_check(nor, sr1);
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
1106{
1107 int ret;
1108
1109 ret = spi_nor_write_enable(nor);
1110 if (ret)
1111 return ret;
1112
1113 if (nor->spimem) {
1114 struct spi_mem_op op =
1115 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0),
1116 SPI_MEM_OP_NO_ADDR,
1117 SPI_MEM_OP_NO_DUMMY,
1118 SPI_MEM_OP_DATA_OUT(1, sr2, 0));
1119
1120 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1121
1122 ret = spi_mem_exec_op(nor->spimem, &op);
1123 } else {
1124 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
1125 sr2, 1);
1126 }
1127
1128 if (ret) {
1129 dev_dbg(nor->dev, "error %d writing SR2\n", ret);
1130 return ret;
1131 }
1132
1133 return spi_nor_wait_till_ready(nor);
1134}
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1146{
1147 int ret;
1148
1149 if (nor->spimem) {
1150 struct spi_mem_op op =
1151 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0),
1152 SPI_MEM_OP_NO_ADDR,
1153 SPI_MEM_OP_NO_DUMMY,
1154 SPI_MEM_OP_DATA_IN(1, sr2, 0));
1155
1156 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1157
1158 ret = spi_mem_exec_op(nor->spimem, &op);
1159 } else {
1160 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
1161 1);
1162 }
1163
1164 if (ret)
1165 dev_dbg(nor->dev, "error %d reading SR2\n", ret);
1166
1167 return ret;
1168}
1169
1170
1171
1172
1173
1174
1175
1176static int spi_nor_erase_chip(struct spi_nor *nor)
1177{
1178 int ret;
1179
1180 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
1181
1182 if (nor->spimem) {
1183 struct spi_mem_op op =
1184 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0),
1185 SPI_MEM_OP_NO_ADDR,
1186 SPI_MEM_OP_NO_DUMMY,
1187 SPI_MEM_OP_NO_DATA);
1188
1189 spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
1190
1191 ret = spi_mem_exec_op(nor->spimem, &op);
1192 } else {
1193 ret = spi_nor_controller_ops_write_reg(nor,
1194 SPINOR_OP_CHIP_ERASE,
1195 NULL, 0);
1196 }
1197
1198 if (ret)
1199 dev_dbg(nor->dev, "error %d erasing chip\n", ret);
1200
1201 return ret;
1202}
1203
1204static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
1205{
1206 size_t i;
1207
1208 for (i = 0; i < size; i++)
1209 if (table[i][0] == opcode)
1210 return table[i][1];
1211
1212
1213 return opcode;
1214}
1215
1216u8 spi_nor_convert_3to4_read(u8 opcode)
1217{
1218 static const u8 spi_nor_3to4_read[][2] = {
1219 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
1220 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
1221 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
1222 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
1223 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
1224 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
1225 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
1226 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
1227
1228 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
1229 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
1230 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
1231 };
1232
1233 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1234 ARRAY_SIZE(spi_nor_3to4_read));
1235}
1236
1237static u8 spi_nor_convert_3to4_program(u8 opcode)
1238{
1239 static const u8 spi_nor_3to4_program[][2] = {
1240 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
1241 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
1242 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
1243 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
1244 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
1245 };
1246
1247 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1248 ARRAY_SIZE(spi_nor_3to4_program));
1249}
1250
1251static u8 spi_nor_convert_3to4_erase(u8 opcode)
1252{
1253 static const u8 spi_nor_3to4_erase[][2] = {
1254 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
1255 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
1256 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
1257 };
1258
1259 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1260 ARRAY_SIZE(spi_nor_3to4_erase));
1261}
1262
1263static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1264{
1265 return !!nor->params->erase_map.uniform_erase_type;
1266}
1267
1268static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1269{
1270 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1271 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1272 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1273
1274 if (!spi_nor_has_uniform_erase(nor)) {
1275 struct spi_nor_erase_map *map = &nor->params->erase_map;
1276 struct spi_nor_erase_type *erase;
1277 int i;
1278
1279 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1280 erase = &map->erase_type[i];
1281 erase->opcode =
1282 spi_nor_convert_3to4_erase(erase->opcode);
1283 }
1284 }
1285}
1286
1287int spi_nor_lock_and_prep(struct spi_nor *nor)
1288{
1289 int ret = 0;
1290
1291 mutex_lock(&nor->lock);
1292
1293 if (nor->controller_ops && nor->controller_ops->prepare) {
1294 ret = nor->controller_ops->prepare(nor);
1295 if (ret) {
1296 mutex_unlock(&nor->lock);
1297 return ret;
1298 }
1299 }
1300 return ret;
1301}
1302
1303void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1304{
1305 if (nor->controller_ops && nor->controller_ops->unprepare)
1306 nor->controller_ops->unprepare(nor);
1307 mutex_unlock(&nor->lock);
1308}
1309
1310static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
1311{
1312 if (!nor->params->convert_addr)
1313 return addr;
1314
1315 return nor->params->convert_addr(nor, addr);
1316}
1317
1318
1319
1320
1321int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1322{
1323 int i;
1324
1325 addr = spi_nor_convert_addr(nor, addr);
1326
1327 if (nor->spimem) {
1328 struct spi_mem_op op =
1329 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0),
1330 SPI_MEM_OP_ADDR(nor->addr_width, addr, 0),
1331 SPI_MEM_OP_NO_DUMMY,
1332 SPI_MEM_OP_NO_DATA);
1333
1334 spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
1335
1336 return spi_mem_exec_op(nor->spimem, &op);
1337 } else if (nor->controller_ops->erase) {
1338 return spi_nor_controller_ops_erase(nor, addr);
1339 }
1340
1341
1342
1343
1344
1345 for (i = nor->addr_width - 1; i >= 0; i--) {
1346 nor->bouncebuf[i] = addr & 0xff;
1347 addr >>= 8;
1348 }
1349
1350 return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
1351 nor->bouncebuf, nor->addr_width);
1352}
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1363 u64 dividend, u32 *remainder)
1364{
1365
1366 *remainder = (u32)dividend & erase->size_mask;
1367 return dividend >> erase->size_shift;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383static const struct spi_nor_erase_type *
1384spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1385 const struct spi_nor_erase_region *region,
1386 u64 addr, u32 len)
1387{
1388 const struct spi_nor_erase_type *erase;
1389 u32 rem;
1390 int i;
1391 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1392
1393
1394
1395
1396
1397 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1398
1399 if (!(erase_mask & BIT(i)))
1400 continue;
1401
1402 erase = &map->erase_type[i];
1403
1404
1405 if (region->offset & SNOR_OVERLAID_REGION &&
1406 region->size <= len)
1407 return erase;
1408
1409
1410 if (erase->size > len)
1411 continue;
1412
1413 spi_nor_div_by_erase_size(erase, addr, &rem);
1414 if (!rem)
1415 return erase;
1416 }
1417
1418 return NULL;
1419}
1420
1421static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
1422{
1423 return region->offset & SNOR_LAST_REGION;
1424}
1425
1426static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
1427{
1428 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
1429}
1430
1431
1432
1433
1434
1435
1436
1437struct spi_nor_erase_region *
1438spi_nor_region_next(struct spi_nor_erase_region *region)
1439{
1440 if (spi_nor_region_is_last(region))
1441 return NULL;
1442 region++;
1443 return region;
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455static struct spi_nor_erase_region *
1456spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1457{
1458 struct spi_nor_erase_region *region = map->regions;
1459 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1460 u64 region_end = region_start + region->size;
1461
1462 while (addr < region_start || addr >= region_end) {
1463 region = spi_nor_region_next(region);
1464 if (!region)
1465 return ERR_PTR(-EINVAL);
1466
1467 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1468 region_end = region_start + region->size;
1469 }
1470
1471 return region;
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static struct spi_nor_erase_command *
1483spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1484 const struct spi_nor_erase_type *erase)
1485{
1486 struct spi_nor_erase_command *cmd;
1487
1488 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1489 if (!cmd)
1490 return ERR_PTR(-ENOMEM);
1491
1492 INIT_LIST_HEAD(&cmd->list);
1493 cmd->opcode = erase->opcode;
1494 cmd->count = 1;
1495
1496 if (region->offset & SNOR_OVERLAID_REGION)
1497 cmd->size = region->size;
1498 else
1499 cmd->size = erase->size;
1500
1501 return cmd;
1502}
1503
1504
1505
1506
1507
1508static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1509{
1510 struct spi_nor_erase_command *cmd, *next;
1511
1512 list_for_each_entry_safe(cmd, next, erase_list, list) {
1513 list_del(&cmd->list);
1514 kfree(cmd);
1515 }
1516}
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1532 struct list_head *erase_list,
1533 u64 addr, u32 len)
1534{
1535 const struct spi_nor_erase_map *map = &nor->params->erase_map;
1536 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1537 struct spi_nor_erase_region *region;
1538 struct spi_nor_erase_command *cmd = NULL;
1539 u64 region_end;
1540 int ret = -EINVAL;
1541
1542 region = spi_nor_find_erase_region(map, addr);
1543 if (IS_ERR(region))
1544 return PTR_ERR(region);
1545
1546 region_end = spi_nor_region_end(region);
1547
1548 while (len) {
1549 erase = spi_nor_find_best_erase_type(map, region, addr, len);
1550 if (!erase)
1551 goto destroy_erase_cmd_list;
1552
1553 if (prev_erase != erase ||
1554 erase->size != cmd->size ||
1555 region->offset & SNOR_OVERLAID_REGION) {
1556 cmd = spi_nor_init_erase_cmd(region, erase);
1557 if (IS_ERR(cmd)) {
1558 ret = PTR_ERR(cmd);
1559 goto destroy_erase_cmd_list;
1560 }
1561
1562 list_add_tail(&cmd->list, erase_list);
1563 } else {
1564 cmd->count++;
1565 }
1566
1567 addr += cmd->size;
1568 len -= cmd->size;
1569
1570 if (len && addr >= region_end) {
1571 region = spi_nor_region_next(region);
1572 if (!region)
1573 goto destroy_erase_cmd_list;
1574 region_end = spi_nor_region_end(region);
1575 }
1576
1577 prev_erase = erase;
1578 }
1579
1580 return 0;
1581
1582destroy_erase_cmd_list:
1583 spi_nor_destroy_erase_cmd_list(erase_list);
1584 return ret;
1585}
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1599{
1600 LIST_HEAD(erase_list);
1601 struct spi_nor_erase_command *cmd, *next;
1602 int ret;
1603
1604 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1605 if (ret)
1606 return ret;
1607
1608 list_for_each_entry_safe(cmd, next, &erase_list, list) {
1609 nor->erase_opcode = cmd->opcode;
1610 while (cmd->count) {
1611 dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
1612 cmd->size, cmd->opcode, cmd->count);
1613
1614 ret = spi_nor_write_enable(nor);
1615 if (ret)
1616 goto destroy_erase_cmd_list;
1617
1618 ret = spi_nor_erase_sector(nor, addr);
1619 if (ret)
1620 goto destroy_erase_cmd_list;
1621
1622 ret = spi_nor_wait_till_ready(nor);
1623 if (ret)
1624 goto destroy_erase_cmd_list;
1625
1626 addr += cmd->size;
1627 cmd->count--;
1628 }
1629 list_del(&cmd->list);
1630 kfree(cmd);
1631 }
1632
1633 return 0;
1634
1635destroy_erase_cmd_list:
1636 spi_nor_destroy_erase_cmd_list(&erase_list);
1637 return ret;
1638}
1639
1640
1641
1642
1643
1644static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1645{
1646 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1647 u32 addr, len;
1648 uint32_t rem;
1649 int ret;
1650
1651 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1652 (long long)instr->len);
1653
1654 if (spi_nor_has_uniform_erase(nor)) {
1655 div_u64_rem(instr->len, mtd->erasesize, &rem);
1656 if (rem)
1657 return -EINVAL;
1658 }
1659
1660 addr = instr->addr;
1661 len = instr->len;
1662
1663 ret = spi_nor_lock_and_prep(nor);
1664 if (ret)
1665 return ret;
1666
1667
1668 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1669 unsigned long timeout;
1670
1671 ret = spi_nor_write_enable(nor);
1672 if (ret)
1673 goto erase_err;
1674
1675 ret = spi_nor_erase_chip(nor);
1676 if (ret)
1677 goto erase_err;
1678
1679
1680
1681
1682
1683
1684
1685 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1686 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1687 (unsigned long)(mtd->size / SZ_2M));
1688 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1689 if (ret)
1690 goto erase_err;
1691
1692
1693
1694
1695
1696
1697
1698 } else if (spi_nor_has_uniform_erase(nor)) {
1699 while (len) {
1700 ret = spi_nor_write_enable(nor);
1701 if (ret)
1702 goto erase_err;
1703
1704 ret = spi_nor_erase_sector(nor, addr);
1705 if (ret)
1706 goto erase_err;
1707
1708 ret = spi_nor_wait_till_ready(nor);
1709 if (ret)
1710 goto erase_err;
1711
1712 addr += mtd->erasesize;
1713 len -= mtd->erasesize;
1714 }
1715
1716
1717 } else {
1718 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1719 if (ret)
1720 goto erase_err;
1721 }
1722
1723 ret = spi_nor_write_disable(nor);
1724
1725erase_err:
1726 spi_nor_unlock_and_unprep(nor);
1727
1728 return ret;
1729}
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
1741{
1742 int ret;
1743
1744 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1745 if (ret)
1746 return ret;
1747
1748 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
1749 return 0;
1750
1751 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
1752
1753 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
1766{
1767 int ret;
1768
1769 if (nor->flags & SNOR_F_NO_READ_CR)
1770 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
1771
1772 ret = spi_nor_read_cr(nor, nor->bouncebuf);
1773 if (ret)
1774 return ret;
1775
1776 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
1777 return 0;
1778
1779 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
1780
1781 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
1797{
1798 u8 *sr2 = nor->bouncebuf;
1799 int ret;
1800 u8 sr2_written;
1801
1802
1803 ret = spi_nor_read_sr2(nor, sr2);
1804 if (ret)
1805 return ret;
1806 if (*sr2 & SR2_QUAD_EN_BIT7)
1807 return 0;
1808
1809
1810 *sr2 |= SR2_QUAD_EN_BIT7;
1811
1812 ret = spi_nor_write_sr2(nor, sr2);
1813 if (ret)
1814 return ret;
1815
1816 sr2_written = *sr2;
1817
1818
1819 ret = spi_nor_read_sr2(nor, sr2);
1820 if (ret)
1821 return ret;
1822
1823 if (*sr2 != sr2_written) {
1824 dev_dbg(nor->dev, "SR2: Read back test failed\n");
1825 return -EIO;
1826 }
1827
1828 return 0;
1829}
1830
1831static const struct spi_nor_manufacturer *manufacturers[] = {
1832 &spi_nor_atmel,
1833 &spi_nor_catalyst,
1834 &spi_nor_eon,
1835 &spi_nor_esmt,
1836 &spi_nor_everspin,
1837 &spi_nor_fujitsu,
1838 &spi_nor_gigadevice,
1839 &spi_nor_intel,
1840 &spi_nor_issi,
1841 &spi_nor_macronix,
1842 &spi_nor_micron,
1843 &spi_nor_st,
1844 &spi_nor_spansion,
1845 &spi_nor_sst,
1846 &spi_nor_winbond,
1847 &spi_nor_xilinx,
1848 &spi_nor_xmc,
1849};
1850
1851static const struct flash_info *
1852spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts,
1853 const u8 *id)
1854{
1855 unsigned int i;
1856
1857 for (i = 0; i < nparts; i++) {
1858 if (parts[i].id_len &&
1859 !memcmp(parts[i].id, id, parts[i].id_len))
1860 return &parts[i];
1861 }
1862
1863 return NULL;
1864}
1865
1866static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
1867{
1868 const struct flash_info *info;
1869 u8 *id = nor->bouncebuf;
1870 unsigned int i;
1871 int ret;
1872
1873 if (nor->spimem) {
1874 struct spi_mem_op op =
1875 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
1876 SPI_MEM_OP_NO_ADDR,
1877 SPI_MEM_OP_NO_DUMMY,
1878 SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
1879
1880 ret = spi_mem_exec_op(nor->spimem, &op);
1881 } else {
1882 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
1883 SPI_NOR_MAX_ID_LEN);
1884 }
1885 if (ret) {
1886 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
1887 return ERR_PTR(ret);
1888 }
1889
1890 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
1891 info = spi_nor_search_part_by_id(manufacturers[i]->parts,
1892 manufacturers[i]->nparts,
1893 id);
1894 if (info) {
1895 nor->manufacturer = manufacturers[i];
1896 return info;
1897 }
1898 }
1899
1900 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
1901 SPI_NOR_MAX_ID_LEN, id);
1902 return ERR_PTR(-ENODEV);
1903}
1904
1905static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
1906 size_t *retlen, u_char *buf)
1907{
1908 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1909 ssize_t ret;
1910
1911 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
1912
1913 ret = spi_nor_lock_and_prep(nor);
1914 if (ret)
1915 return ret;
1916
1917 while (len) {
1918 loff_t addr = from;
1919
1920 addr = spi_nor_convert_addr(nor, addr);
1921
1922 ret = spi_nor_read_data(nor, addr, len, buf);
1923 if (ret == 0) {
1924
1925 ret = -EIO;
1926 goto read_err;
1927 }
1928 if (ret < 0)
1929 goto read_err;
1930
1931 WARN_ON(ret > len);
1932 *retlen += ret;
1933 buf += ret;
1934 from += ret;
1935 len -= ret;
1936 }
1937 ret = 0;
1938
1939read_err:
1940 spi_nor_unlock_and_unprep(nor);
1941 return ret;
1942}
1943
1944
1945
1946
1947
1948
1949static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
1950 size_t *retlen, const u_char *buf)
1951{
1952 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1953 size_t page_offset, page_remain, i;
1954 ssize_t ret;
1955
1956 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1957
1958 ret = spi_nor_lock_and_prep(nor);
1959 if (ret)
1960 return ret;
1961
1962 for (i = 0; i < len; ) {
1963 ssize_t written;
1964 loff_t addr = to + i;
1965
1966
1967
1968
1969
1970
1971 if (is_power_of_2(nor->page_size)) {
1972 page_offset = addr & (nor->page_size - 1);
1973 } else {
1974 uint64_t aux = addr;
1975
1976 page_offset = do_div(aux, nor->page_size);
1977 }
1978
1979 page_remain = min_t(size_t,
1980 nor->page_size - page_offset, len - i);
1981
1982 addr = spi_nor_convert_addr(nor, addr);
1983
1984 ret = spi_nor_write_enable(nor);
1985 if (ret)
1986 goto write_err;
1987
1988 ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
1989 if (ret < 0)
1990 goto write_err;
1991 written = ret;
1992
1993 ret = spi_nor_wait_till_ready(nor);
1994 if (ret)
1995 goto write_err;
1996 *retlen += written;
1997 i += written;
1998 }
1999
2000write_err:
2001 spi_nor_unlock_and_unprep(nor);
2002 return ret;
2003}
2004
2005static int spi_nor_check(struct spi_nor *nor)
2006{
2007 if (!nor->dev ||
2008 (!nor->spimem && !nor->controller_ops) ||
2009 (!nor->spimem && nor->controller_ops &&
2010 (!nor->controller_ops->read ||
2011 !nor->controller_ops->write ||
2012 !nor->controller_ops->read_reg ||
2013 !nor->controller_ops->write_reg))) {
2014 pr_err("spi-nor: please fill all the necessary fields!\n");
2015 return -EINVAL;
2016 }
2017
2018 if (nor->spimem && nor->controller_ops) {
2019 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2020 return -EINVAL;
2021 }
2022
2023 return 0;
2024}
2025
2026void
2027spi_nor_set_read_settings(struct spi_nor_read_command *read,
2028 u8 num_mode_clocks,
2029 u8 num_wait_states,
2030 u8 opcode,
2031 enum spi_nor_protocol proto)
2032{
2033 read->num_mode_clocks = num_mode_clocks;
2034 read->num_wait_states = num_wait_states;
2035 read->opcode = opcode;
2036 read->proto = proto;
2037}
2038
2039void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
2040 enum spi_nor_protocol proto)
2041{
2042 pp->opcode = opcode;
2043 pp->proto = proto;
2044}
2045
2046static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2047{
2048 size_t i;
2049
2050 for (i = 0; i < size; i++)
2051 if (table[i][0] == (int)hwcaps)
2052 return table[i][1];
2053
2054 return -EINVAL;
2055}
2056
2057int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2058{
2059 static const int hwcaps_read2cmd[][2] = {
2060 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2061 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2062 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2063 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2064 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2065 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2066 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2067 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2068 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2069 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2070 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2071 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2072 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2073 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2074 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2075 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
2076 };
2077
2078 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2079 ARRAY_SIZE(hwcaps_read2cmd));
2080}
2081
2082static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2083{
2084 static const int hwcaps_pp2cmd[][2] = {
2085 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2086 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2087 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2088 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2089 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2090 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2091 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2092 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
2093 };
2094
2095 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2096 ARRAY_SIZE(hwcaps_pp2cmd));
2097}
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107static int spi_nor_spimem_check_op(struct spi_nor *nor,
2108 struct spi_mem_op *op)
2109{
2110
2111
2112
2113
2114
2115
2116 op->addr.nbytes = 4;
2117 if (!spi_mem_supports_op(nor->spimem, op)) {
2118 if (nor->mtd.size > SZ_16M)
2119 return -EOPNOTSUPP;
2120
2121
2122 op->addr.nbytes = 3;
2123 if (!spi_mem_supports_op(nor->spimem, op))
2124 return -EOPNOTSUPP;
2125 }
2126
2127 return 0;
2128}
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2139 const struct spi_nor_read_command *read)
2140{
2141 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0),
2142 SPI_MEM_OP_ADDR(3, 0, 0),
2143 SPI_MEM_OP_DUMMY(1, 0),
2144 SPI_MEM_OP_DATA_IN(1, NULL, 0));
2145
2146 spi_nor_spimem_setup_op(nor, &op, read->proto);
2147
2148
2149 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
2150 if (spi_nor_protocol_is_dtr(nor->read_proto))
2151 op.dummy.nbytes *= 2;
2152
2153 return spi_nor_spimem_check_op(nor, &op);
2154}
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2165 const struct spi_nor_pp_command *pp)
2166{
2167 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0),
2168 SPI_MEM_OP_ADDR(3, 0, 0),
2169 SPI_MEM_OP_NO_DUMMY,
2170 SPI_MEM_OP_DATA_OUT(1, NULL, 0));
2171
2172 spi_nor_spimem_setup_op(nor, &op, pp->proto);
2173
2174 return spi_nor_spimem_check_op(nor, &op);
2175}
2176
2177
2178
2179
2180
2181
2182
2183
2184static void
2185spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
2186{
2187 struct spi_nor_flash_parameter *params = nor->params;
2188 unsigned int cap;
2189
2190
2191 *hwcaps &= ~SNOR_HWCAPS_X_X_X;
2192
2193
2194
2195
2196
2197 if (nor->flags & SNOR_F_BROKEN_RESET)
2198 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
2199
2200 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2201 int rdidx, ppidx;
2202
2203 if (!(*hwcaps & BIT(cap)))
2204 continue;
2205
2206 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2207 if (rdidx >= 0 &&
2208 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx]))
2209 *hwcaps &= ~BIT(cap);
2210
2211 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2212 if (ppidx < 0)
2213 continue;
2214
2215 if (spi_nor_spimem_check_pp(nor,
2216 ¶ms->page_programs[ppidx]))
2217 *hwcaps &= ~BIT(cap);
2218 }
2219}
2220
2221
2222
2223
2224
2225
2226
2227void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2228 u8 opcode)
2229{
2230 erase->size = size;
2231 erase->opcode = opcode;
2232
2233 erase->size_shift = ffs(erase->size) - 1;
2234 erase->size_mask = (1 << erase->size_shift) - 1;
2235}
2236
2237
2238
2239
2240
2241
2242
2243
2244void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2245 u8 erase_mask, u64 flash_size)
2246{
2247
2248 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2249 SNOR_LAST_REGION;
2250 map->uniform_region.size = flash_size;
2251 map->regions = &map->uniform_region;
2252 map->uniform_erase_type = erase_mask;
2253}
2254
2255int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2256 const struct sfdp_parameter_header *bfpt_header,
2257 const struct sfdp_bfpt *bfpt)
2258{
2259 int ret;
2260
2261 if (nor->manufacturer && nor->manufacturer->fixups &&
2262 nor->manufacturer->fixups->post_bfpt) {
2263 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2264 bfpt);
2265 if (ret)
2266 return ret;
2267 }
2268
2269 if (nor->info->fixups && nor->info->fixups->post_bfpt)
2270 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt);
2271
2272 return 0;
2273}
2274
2275static int spi_nor_select_read(struct spi_nor *nor,
2276 u32 shared_hwcaps)
2277{
2278 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2279 const struct spi_nor_read_command *read;
2280
2281 if (best_match < 0)
2282 return -EINVAL;
2283
2284 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2285 if (cmd < 0)
2286 return -EINVAL;
2287
2288 read = &nor->params->reads[cmd];
2289 nor->read_opcode = read->opcode;
2290 nor->read_proto = read->proto;
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2303 return 0;
2304}
2305
2306static int spi_nor_select_pp(struct spi_nor *nor,
2307 u32 shared_hwcaps)
2308{
2309 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2310 const struct spi_nor_pp_command *pp;
2311
2312 if (best_match < 0)
2313 return -EINVAL;
2314
2315 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2316 if (cmd < 0)
2317 return -EINVAL;
2318
2319 pp = &nor->params->page_programs[cmd];
2320 nor->program_opcode = pp->opcode;
2321 nor->write_proto = pp->proto;
2322 return 0;
2323}
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337static const struct spi_nor_erase_type *
2338spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
2339 const u32 wanted_size)
2340{
2341 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2342 int i;
2343 u8 uniform_erase_type = map->uniform_erase_type;
2344
2345 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2346 if (!(uniform_erase_type & BIT(i)))
2347 continue;
2348
2349 tested_erase = &map->erase_type[i];
2350
2351
2352
2353
2354
2355 if (tested_erase->size == wanted_size) {
2356 erase = tested_erase;
2357 break;
2358 }
2359
2360
2361
2362
2363
2364 if (!erase && tested_erase->size)
2365 erase = tested_erase;
2366
2367 }
2368
2369 if (!erase)
2370 return NULL;
2371
2372
2373 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
2374 map->uniform_erase_type |= BIT(erase - map->erase_type);
2375 return erase;
2376}
2377
2378static int spi_nor_select_erase(struct spi_nor *nor)
2379{
2380 struct spi_nor_erase_map *map = &nor->params->erase_map;
2381 const struct spi_nor_erase_type *erase = NULL;
2382 struct mtd_info *mtd = &nor->mtd;
2383 u32 wanted_size = nor->info->sector_size;
2384 int i;
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
2395
2396 wanted_size = 4096u;
2397#endif
2398
2399 if (spi_nor_has_uniform_erase(nor)) {
2400 erase = spi_nor_select_uniform_erase(map, wanted_size);
2401 if (!erase)
2402 return -EINVAL;
2403 nor->erase_opcode = erase->opcode;
2404 mtd->erasesize = erase->size;
2405 return 0;
2406 }
2407
2408
2409
2410
2411
2412 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2413 if (map->erase_type[i].size) {
2414 erase = &map->erase_type[i];
2415 break;
2416 }
2417 }
2418
2419 if (!erase)
2420 return -EINVAL;
2421
2422 mtd->erasesize = erase->size;
2423 return 0;
2424}
2425
2426static int spi_nor_default_setup(struct spi_nor *nor,
2427 const struct spi_nor_hwcaps *hwcaps)
2428{
2429 struct spi_nor_flash_parameter *params = nor->params;
2430 u32 ignored_mask, shared_mask;
2431 int err;
2432
2433
2434
2435
2436
2437 shared_mask = hwcaps->mask & params->hwcaps.mask;
2438
2439 if (nor->spimem) {
2440
2441
2442
2443
2444
2445 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
2446 } else {
2447
2448
2449
2450
2451
2452 ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
2453 if (shared_mask & ignored_mask) {
2454 dev_dbg(nor->dev,
2455 "SPI n-n-n protocols are not supported.\n");
2456 shared_mask &= ~ignored_mask;
2457 }
2458 }
2459
2460
2461 err = spi_nor_select_read(nor, shared_mask);
2462 if (err) {
2463 dev_dbg(nor->dev,
2464 "can't select read settings supported by both the SPI controller and memory.\n");
2465 return err;
2466 }
2467
2468
2469 err = spi_nor_select_pp(nor, shared_mask);
2470 if (err) {
2471 dev_dbg(nor->dev,
2472 "can't select write settings supported by both the SPI controller and memory.\n");
2473 return err;
2474 }
2475
2476
2477 err = spi_nor_select_erase(nor);
2478 if (err) {
2479 dev_dbg(nor->dev,
2480 "can't select erase settings supported by both the SPI controller and memory.\n");
2481 return err;
2482 }
2483
2484 return 0;
2485}
2486
2487static int spi_nor_setup(struct spi_nor *nor,
2488 const struct spi_nor_hwcaps *hwcaps)
2489{
2490 if (!nor->params->setup)
2491 return 0;
2492
2493 return nor->params->setup(nor, hwcaps);
2494}
2495
2496
2497
2498
2499
2500
2501static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2502{
2503 if (nor->manufacturer && nor->manufacturer->fixups &&
2504 nor->manufacturer->fixups->default_init)
2505 nor->manufacturer->fixups->default_init(nor);
2506
2507 if (nor->info->fixups && nor->info->fixups->default_init)
2508 nor->info->fixups->default_init(nor);
2509}
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519static void spi_nor_sfdp_init_params(struct spi_nor *nor)
2520{
2521 struct spi_nor_flash_parameter sfdp_params;
2522
2523 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2524
2525 if (spi_nor_parse_sfdp(nor)) {
2526 memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2527 nor->addr_width = 0;
2528 nor->flags &= ~SNOR_F_4B_OPCODES;
2529 }
2530}
2531
2532
2533
2534
2535
2536
2537static void spi_nor_info_init_params(struct spi_nor *nor)
2538{
2539 struct spi_nor_flash_parameter *params = nor->params;
2540 struct spi_nor_erase_map *map = ¶ms->erase_map;
2541 const struct flash_info *info = nor->info;
2542 struct device_node *np = spi_nor_get_flash_node(nor);
2543 u8 i, erase_mask;
2544
2545
2546 params->quad_enable = spi_nor_sr2_bit1_quad_enable;
2547 params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
2548 params->setup = spi_nor_default_setup;
2549 params->otp.org = &info->otp_org;
2550
2551
2552 nor->flags |= SNOR_F_HAS_16BIT_SR;
2553
2554
2555 params->writesize = 1;
2556 params->size = (u64)info->sector_size * info->n_sectors;
2557 params->page_size = info->page_size;
2558
2559 if (!(info->flags & SPI_NOR_NO_FR)) {
2560
2561 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2562
2563
2564 if (np && !of_property_read_bool(np, "m25p,fast-read"))
2565 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
2566 }
2567
2568
2569 params->hwcaps.mask |= SNOR_HWCAPS_READ;
2570 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
2571 0, 0, SPINOR_OP_READ,
2572 SNOR_PROTO_1_1_1);
2573
2574 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
2575 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
2576 0, 8, SPINOR_OP_READ_FAST,
2577 SNOR_PROTO_1_1_1);
2578
2579 if (info->flags & SPI_NOR_DUAL_READ) {
2580 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2581 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
2582 0, 8, SPINOR_OP_READ_1_1_2,
2583 SNOR_PROTO_1_1_2);
2584 }
2585
2586 if (info->flags & SPI_NOR_QUAD_READ) {
2587 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2588 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
2589 0, 8, SPINOR_OP_READ_1_1_4,
2590 SNOR_PROTO_1_1_4);
2591 }
2592
2593 if (info->flags & SPI_NOR_OCTAL_READ) {
2594 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2595 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
2596 0, 8, SPINOR_OP_READ_1_1_8,
2597 SNOR_PROTO_1_1_8);
2598 }
2599
2600 if (info->flags & SPI_NOR_OCTAL_DTR_READ) {
2601 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
2602 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
2603 0, 20, SPINOR_OP_READ_FAST,
2604 SNOR_PROTO_8_8_8_DTR);
2605 }
2606
2607
2608 params->hwcaps.mask |= SNOR_HWCAPS_PP;
2609 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
2610 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2611
2612 if (info->flags & SPI_NOR_OCTAL_DTR_PP) {
2613 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
2614
2615
2616
2617
2618 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR],
2619 SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
2620 }
2621
2622
2623
2624
2625
2626 erase_mask = 0;
2627 i = 0;
2628 if (info->flags & SECT_4K_PMC) {
2629 erase_mask |= BIT(i);
2630 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2631 SPINOR_OP_BE_4K_PMC);
2632 i++;
2633 } else if (info->flags & SECT_4K) {
2634 erase_mask |= BIT(i);
2635 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2636 SPINOR_OP_BE_4K);
2637 i++;
2638 }
2639 erase_mask |= BIT(i);
2640 spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
2641 SPINOR_OP_SE);
2642 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2643}
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
2656{
2657 if (nor->manufacturer && nor->manufacturer->fixups &&
2658 nor->manufacturer->fixups->post_sfdp)
2659 nor->manufacturer->fixups->post_sfdp(nor);
2660
2661 if (nor->info->fixups && nor->info->fixups->post_sfdp)
2662 nor->info->fixups->post_sfdp(nor);
2663}
2664
2665
2666
2667
2668
2669
2670
2671
2672static void spi_nor_late_init_params(struct spi_nor *nor)
2673{
2674
2675
2676
2677
2678 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
2679 spi_nor_init_default_locking_ops(nor);
2680}
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719static int spi_nor_init_params(struct spi_nor *nor)
2720{
2721 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
2722 if (!nor->params)
2723 return -ENOMEM;
2724
2725 spi_nor_info_init_params(nor);
2726
2727 spi_nor_manufacturer_init_params(nor);
2728
2729 if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2730 SPI_NOR_OCTAL_READ | SPI_NOR_OCTAL_DTR_READ)) &&
2731 !(nor->info->flags & SPI_NOR_SKIP_SFDP))
2732 spi_nor_sfdp_init_params(nor);
2733
2734 spi_nor_post_sfdp_fixups(nor);
2735
2736 spi_nor_late_init_params(nor);
2737
2738 return 0;
2739}
2740
2741
2742
2743
2744
2745
2746
2747static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
2748{
2749 int ret;
2750
2751 if (!nor->params->octal_dtr_enable)
2752 return 0;
2753
2754 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
2755 nor->write_proto == SNOR_PROTO_8_8_8_DTR))
2756 return 0;
2757
2758 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
2759 return 0;
2760
2761 ret = nor->params->octal_dtr_enable(nor, enable);
2762 if (ret)
2763 return ret;
2764
2765 if (enable)
2766 nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
2767 else
2768 nor->reg_proto = SNOR_PROTO_1_1_1;
2769
2770 return 0;
2771}
2772
2773
2774
2775
2776
2777
2778
2779static int spi_nor_quad_enable(struct spi_nor *nor)
2780{
2781 if (!nor->params->quad_enable)
2782 return 0;
2783
2784 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
2785 spi_nor_get_protocol_width(nor->write_proto) == 4))
2786 return 0;
2787
2788 return nor->params->quad_enable(nor);
2789}
2790
2791static int spi_nor_init(struct spi_nor *nor)
2792{
2793 int err;
2794
2795 err = spi_nor_octal_dtr_enable(nor, true);
2796 if (err) {
2797 dev_dbg(nor->dev, "octal mode not supported\n");
2798 return err;
2799 }
2800
2801 err = spi_nor_quad_enable(nor);
2802 if (err) {
2803 dev_dbg(nor->dev, "quad mode not supported\n");
2804 return err;
2805 }
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
2818 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
2819 nor->flags & SNOR_F_SWP_IS_VOLATILE))
2820 spi_nor_try_unlock_all(nor);
2821
2822 if (nor->addr_width == 4 &&
2823 nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
2824 !(nor->flags & SNOR_F_4B_OPCODES)) {
2825
2826
2827
2828
2829
2830
2831
2832 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
2833 "enabling reset hack; may not recover from unexpected reboots\n");
2834 nor->params->set_4byte_addr_mode(nor, true);
2835 }
2836
2837 return 0;
2838}
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855static void spi_nor_soft_reset(struct spi_nor *nor)
2856{
2857 struct spi_mem_op op;
2858 int ret;
2859
2860 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0),
2861 SPI_MEM_OP_NO_DUMMY,
2862 SPI_MEM_OP_NO_ADDR,
2863 SPI_MEM_OP_NO_DATA);
2864
2865 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
2866
2867 ret = spi_mem_exec_op(nor->spimem, &op);
2868 if (ret) {
2869 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
2870 return;
2871 }
2872
2873 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0),
2874 SPI_MEM_OP_NO_DUMMY,
2875 SPI_MEM_OP_NO_ADDR,
2876 SPI_MEM_OP_NO_DATA);
2877
2878 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
2879
2880 ret = spi_mem_exec_op(nor->spimem, &op);
2881 if (ret) {
2882 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
2883 return;
2884 }
2885
2886
2887
2888
2889
2890
2891 usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
2892}
2893
2894
2895static int spi_nor_suspend(struct mtd_info *mtd)
2896{
2897 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2898 int ret;
2899
2900
2901 ret = spi_nor_octal_dtr_enable(nor, false);
2902 if (ret)
2903 dev_err(nor->dev, "suspend() failed\n");
2904
2905 return ret;
2906}
2907
2908
2909static void spi_nor_resume(struct mtd_info *mtd)
2910{
2911 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2912 struct device *dev = nor->dev;
2913 int ret;
2914
2915
2916 ret = spi_nor_init(nor);
2917 if (ret)
2918 dev_err(dev, "resume() failed\n");
2919}
2920
2921static int spi_nor_get_device(struct mtd_info *mtd)
2922{
2923 struct mtd_info *master = mtd_get_master(mtd);
2924 struct spi_nor *nor = mtd_to_spi_nor(master);
2925 struct device *dev;
2926
2927 if (nor->spimem)
2928 dev = nor->spimem->spi->controller->dev.parent;
2929 else
2930 dev = nor->dev;
2931
2932 if (!try_module_get(dev->driver->owner))
2933 return -ENODEV;
2934
2935 return 0;
2936}
2937
2938static void spi_nor_put_device(struct mtd_info *mtd)
2939{
2940 struct mtd_info *master = mtd_get_master(mtd);
2941 struct spi_nor *nor = mtd_to_spi_nor(master);
2942 struct device *dev;
2943
2944 if (nor->spimem)
2945 dev = nor->spimem->spi->controller->dev.parent;
2946 else
2947 dev = nor->dev;
2948
2949 module_put(dev->driver->owner);
2950}
2951
2952void spi_nor_restore(struct spi_nor *nor)
2953{
2954
2955 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
2956 nor->flags & SNOR_F_BROKEN_RESET)
2957 nor->params->set_4byte_addr_mode(nor, false);
2958
2959 if (nor->flags & SNOR_F_SOFT_RESET)
2960 spi_nor_soft_reset(nor);
2961}
2962EXPORT_SYMBOL_GPL(spi_nor_restore);
2963
2964static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
2965 const char *name)
2966{
2967 unsigned int i, j;
2968
2969 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
2970 for (j = 0; j < manufacturers[i]->nparts; j++) {
2971 if (!strcmp(name, manufacturers[i]->parts[j].name)) {
2972 nor->manufacturer = manufacturers[i];
2973 return &manufacturers[i]->parts[j];
2974 }
2975 }
2976 }
2977
2978 return NULL;
2979}
2980
2981static int spi_nor_set_addr_width(struct spi_nor *nor)
2982{
2983 if (nor->addr_width) {
2984
2985 } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998 nor->addr_width = 4;
2999 } else if (nor->info->addr_width) {
3000 nor->addr_width = nor->info->addr_width;
3001 } else {
3002 nor->addr_width = 3;
3003 }
3004
3005 if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) {
3006
3007 nor->addr_width = 4;
3008 }
3009
3010 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
3011 dev_dbg(nor->dev, "address width is too large: %u\n",
3012 nor->addr_width);
3013 return -EINVAL;
3014 }
3015
3016
3017 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
3018 !(nor->flags & SNOR_F_HAS_4BAIT))
3019 spi_nor_set_4byte_opcodes(nor);
3020
3021 return 0;
3022}
3023
3024static void spi_nor_debugfs_init(struct spi_nor *nor,
3025 const struct flash_info *info)
3026{
3027 struct mtd_info *mtd = &nor->mtd;
3028
3029 mtd->dbg.partname = info->name;
3030 mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
3031 info->id_len, info->id);
3032}
3033
3034static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
3035 const char *name)
3036{
3037 const struct flash_info *info = NULL;
3038
3039 if (name)
3040 info = spi_nor_match_id(nor, name);
3041
3042 if (!info)
3043 info = spi_nor_read_id(nor);
3044 if (IS_ERR_OR_NULL(info))
3045 return ERR_PTR(-ENOENT);
3046
3047
3048
3049
3050
3051 if (name && info->id_len) {
3052 const struct flash_info *jinfo;
3053
3054 jinfo = spi_nor_read_id(nor);
3055 if (IS_ERR(jinfo)) {
3056 return jinfo;
3057 } else if (jinfo != info) {
3058
3059
3060
3061
3062
3063
3064
3065 dev_warn(nor->dev, "found %s, expected %s\n",
3066 jinfo->name, info->name);
3067 info = jinfo;
3068 }
3069 }
3070
3071 return info;
3072}
3073
3074int spi_nor_scan(struct spi_nor *nor, const char *name,
3075 const struct spi_nor_hwcaps *hwcaps)
3076{
3077 const struct flash_info *info;
3078 struct device *dev = nor->dev;
3079 struct mtd_info *mtd = &nor->mtd;
3080 struct device_node *np = spi_nor_get_flash_node(nor);
3081 int ret;
3082 int i;
3083
3084 ret = spi_nor_check(nor);
3085 if (ret)
3086 return ret;
3087
3088
3089 nor->reg_proto = SNOR_PROTO_1_1_1;
3090 nor->read_proto = SNOR_PROTO_1_1_1;
3091 nor->write_proto = SNOR_PROTO_1_1_1;
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101 nor->bouncebuf_size = PAGE_SIZE;
3102 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3103 GFP_KERNEL);
3104 if (!nor->bouncebuf)
3105 return -ENOMEM;
3106
3107 info = spi_nor_get_flash_info(nor, name);
3108 if (IS_ERR(info))
3109 return PTR_ERR(info);
3110
3111 nor->info = info;
3112
3113 spi_nor_debugfs_init(nor, info);
3114
3115 mutex_init(&nor->lock);
3116
3117
3118
3119
3120
3121
3122 if (info->flags & SPI_NOR_XSR_RDY)
3123 nor->flags |= SNOR_F_READY_XSR_RDY;
3124
3125 if (info->flags & SPI_NOR_HAS_LOCK)
3126 nor->flags |= SNOR_F_HAS_LOCK;
3127
3128 mtd->_write = spi_nor_write;
3129
3130
3131 ret = spi_nor_init_params(nor);
3132 if (ret)
3133 return ret;
3134
3135 if (!mtd->name)
3136 mtd->name = dev_name(dev);
3137 mtd->priv = nor;
3138 mtd->type = MTD_NORFLASH;
3139 mtd->writesize = nor->params->writesize;
3140 mtd->flags = MTD_CAP_NORFLASH;
3141 mtd->size = nor->params->size;
3142 mtd->_erase = spi_nor_erase;
3143 mtd->_read = spi_nor_read;
3144 mtd->_suspend = spi_nor_suspend;
3145 mtd->_resume = spi_nor_resume;
3146 mtd->_get_device = spi_nor_get_device;
3147 mtd->_put_device = spi_nor_put_device;
3148
3149 if (info->flags & USE_FSR)
3150 nor->flags |= SNOR_F_USE_FSR;
3151 if (info->flags & SPI_NOR_HAS_TB) {
3152 nor->flags |= SNOR_F_HAS_SR_TB;
3153 if (info->flags & SPI_NOR_TB_SR_BIT6)
3154 nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
3155 }
3156
3157 if (info->flags & NO_CHIP_ERASE)
3158 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
3159 if (info->flags & USE_CLSR)
3160 nor->flags |= SNOR_F_USE_CLSR;
3161 if (info->flags & SPI_NOR_SWP_IS_VOLATILE)
3162 nor->flags |= SNOR_F_SWP_IS_VOLATILE;
3163
3164 if (info->flags & SPI_NOR_4BIT_BP) {
3165 nor->flags |= SNOR_F_HAS_4BIT_BP;
3166 if (info->flags & SPI_NOR_BP3_SR_BIT6)
3167 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
3168 }
3169
3170 if (info->flags & SPI_NOR_NO_ERASE)
3171 mtd->flags |= MTD_NO_ERASE;
3172
3173 mtd->dev.parent = dev;
3174 nor->page_size = nor->params->page_size;
3175 mtd->writebufsize = nor->page_size;
3176
3177 if (of_property_read_bool(np, "broken-flash-reset"))
3178 nor->flags |= SNOR_F_BROKEN_RESET;
3179
3180
3181
3182
3183
3184
3185
3186 ret = spi_nor_setup(nor, hwcaps);
3187 if (ret)
3188 return ret;
3189
3190 if (info->flags & SPI_NOR_4B_OPCODES)
3191 nor->flags |= SNOR_F_4B_OPCODES;
3192
3193 if (info->flags & SPI_NOR_IO_MODE_EN_VOLATILE)
3194 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
3195
3196 ret = spi_nor_set_addr_width(nor);
3197 if (ret)
3198 return ret;
3199
3200 spi_nor_register_locking_ops(nor);
3201
3202
3203 ret = spi_nor_init(nor);
3204 if (ret)
3205 return ret;
3206
3207
3208 spi_nor_otp_init(nor);
3209
3210 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
3211 (long long)mtd->size >> 10);
3212
3213 dev_dbg(dev,
3214 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
3215 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
3216 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
3217 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
3218
3219 if (mtd->numeraseregions)
3220 for (i = 0; i < mtd->numeraseregions; i++)
3221 dev_dbg(dev,
3222 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
3223 ".erasesize = 0x%.8x (%uKiB), "
3224 ".numblocks = %d }\n",
3225 i, (long long)mtd->eraseregions[i].offset,
3226 mtd->eraseregions[i].erasesize,
3227 mtd->eraseregions[i].erasesize / 1024,
3228 mtd->eraseregions[i].numblocks);
3229 return 0;
3230}
3231EXPORT_SYMBOL_GPL(spi_nor_scan);
3232
3233static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3234{
3235 struct spi_mem_dirmap_info info = {
3236 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
3237 SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
3238 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
3239 SPI_MEM_OP_DATA_IN(0, NULL, 0)),
3240 .offset = 0,
3241 .length = nor->mtd.size,
3242 };
3243 struct spi_mem_op *op = &info.op_tmpl;
3244
3245 spi_nor_spimem_setup_op(nor, op, nor->read_proto);
3246
3247
3248 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3249 if (spi_nor_protocol_is_dtr(nor->read_proto))
3250 op->dummy.nbytes *= 2;
3251
3252
3253
3254
3255
3256
3257 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3258
3259 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3260 &info);
3261 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3262}
3263
3264static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3265{
3266 struct spi_mem_dirmap_info info = {
3267 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
3268 SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
3269 SPI_MEM_OP_NO_DUMMY,
3270 SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
3271 .offset = 0,
3272 .length = nor->mtd.size,
3273 };
3274 struct spi_mem_op *op = &info.op_tmpl;
3275
3276 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3277 op->addr.nbytes = 0;
3278
3279 spi_nor_spimem_setup_op(nor, op, nor->write_proto);
3280
3281
3282
3283
3284
3285
3286 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3287
3288 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3289 &info);
3290 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3291}
3292
3293static int spi_nor_probe(struct spi_mem *spimem)
3294{
3295 struct spi_device *spi = spimem->spi;
3296 struct flash_platform_data *data = dev_get_platdata(&spi->dev);
3297 struct spi_nor *nor;
3298
3299
3300
3301
3302 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3303 char *flash_name;
3304 int ret;
3305
3306 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
3307 if (!nor)
3308 return -ENOMEM;
3309
3310 nor->spimem = spimem;
3311 nor->dev = &spi->dev;
3312 spi_nor_set_flash_node(nor, spi->dev.of_node);
3313
3314 spi_mem_set_drvdata(spimem, nor);
3315
3316 if (data && data->name)
3317 nor->mtd.name = data->name;
3318
3319 if (!nor->mtd.name)
3320 nor->mtd.name = spi_mem_get_name(spimem);
3321
3322
3323
3324
3325
3326
3327
3328 if (data && data->type)
3329 flash_name = data->type;
3330 else if (!strcmp(spi->modalias, "spi-nor"))
3331 flash_name = NULL;
3332 else
3333 flash_name = spi->modalias;
3334
3335 ret = spi_nor_scan(nor, flash_name, &hwcaps);
3336 if (ret)
3337 return ret;
3338
3339
3340
3341
3342
3343
3344 if (nor->page_size > PAGE_SIZE) {
3345 nor->bouncebuf_size = nor->page_size;
3346 devm_kfree(nor->dev, nor->bouncebuf);
3347 nor->bouncebuf = devm_kmalloc(nor->dev,
3348 nor->bouncebuf_size,
3349 GFP_KERNEL);
3350 if (!nor->bouncebuf)
3351 return -ENOMEM;
3352 }
3353
3354 ret = spi_nor_create_read_dirmap(nor);
3355 if (ret)
3356 return ret;
3357
3358 ret = spi_nor_create_write_dirmap(nor);
3359 if (ret)
3360 return ret;
3361
3362 return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3363 data ? data->nr_parts : 0);
3364}
3365
3366static int spi_nor_remove(struct spi_mem *spimem)
3367{
3368 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3369
3370 spi_nor_restore(nor);
3371
3372
3373 return mtd_device_unregister(&nor->mtd);
3374}
3375
3376static void spi_nor_shutdown(struct spi_mem *spimem)
3377{
3378 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3379
3380 spi_nor_restore(nor);
3381}
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395static const struct spi_device_id spi_nor_dev_ids[] = {
3396
3397
3398
3399
3400
3401 {"spi-nor"},
3402
3403
3404
3405
3406
3407 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
3408
3409
3410
3411
3412
3413 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
3414 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3415 {"mx25l25635e"},{"mx66l51235l"},
3416 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3417 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
3418 {"s25fl064k"},
3419 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3420 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
3421 {"m25p64"}, {"m25p128"},
3422 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
3423 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
3424
3425
3426 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
3427 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
3428 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
3429
3430
3431 { "mr25h128" },
3432 { "mr25h256" },
3433 { "mr25h10" },
3434 { "mr25h40" },
3435
3436 { },
3437};
3438MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3439
3440static const struct of_device_id spi_nor_of_table[] = {
3441
3442
3443
3444
3445 { .compatible = "jedec,spi-nor" },
3446 { },
3447};
3448MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3449
3450
3451
3452
3453
3454
3455static struct spi_mem_driver spi_nor_driver = {
3456 .spidrv = {
3457 .driver = {
3458 .name = "spi-nor",
3459 .of_match_table = spi_nor_of_table,
3460 .dev_groups = spi_nor_sysfs_groups,
3461 },
3462 .id_table = spi_nor_dev_ids,
3463 },
3464 .probe = spi_nor_probe,
3465 .remove = spi_nor_remove,
3466 .shutdown = spi_nor_shutdown,
3467};
3468module_spi_mem_driver(spi_nor_driver);
3469
3470MODULE_LICENSE("GPL v2");
3471MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3472MODULE_AUTHOR("Mike Lavender");
3473MODULE_DESCRIPTION("framework for SPI NOR");
3474