1
2
3
4
5
6
7
8
9
10#include <linux/err.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/mutex.h>
15#include <linux/math64.h>
16#include <linux/sizes.h>
17#include <linux/slab.h>
18
19#include <linux/mtd/mtd.h>
20#include <linux/of_platform.h>
21#include <linux/sched/task_stack.h>
22#include <linux/spi/flash.h>
23#include <linux/mtd/spi-nor.h>
24
25#include "core.h"
26
27
28
29
30
31
32
33#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
34
35
36
37
38
39#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
40
41#define SPI_NOR_MAX_ADDR_WIDTH 4
42
43#define SPI_NOR_SRST_SLEEP_MIN 200
44#define SPI_NOR_SRST_SLEEP_MAX 400
45
46
47
48
49
50
51
52
53
54
55
56
57static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
58 const struct spi_mem_op *op)
59{
60 switch (nor->cmd_ext_type) {
61 case SPI_NOR_EXT_INVERT:
62 return ~op->cmd.opcode;
63
64 case SPI_NOR_EXT_REPEAT:
65 return op->cmd.opcode;
66
67 default:
68 dev_err(nor->dev, "Unknown command extension type\n");
69 return 0;
70 }
71}
72
73
74
75
76
77
78
79
80void spi_nor_spimem_setup_op(const struct spi_nor *nor,
81 struct spi_mem_op *op,
82 const enum spi_nor_protocol proto)
83{
84 u8 ext;
85
86 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
87
88 if (op->addr.nbytes)
89 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
90
91 if (op->dummy.nbytes)
92 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
93
94 if (op->data.nbytes)
95 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
96
97 if (spi_nor_protocol_is_dtr(proto)) {
98
99
100
101
102
103
104 op->cmd.dtr = true;
105 op->addr.dtr = true;
106 op->dummy.dtr = true;
107 op->data.dtr = true;
108
109
110 op->dummy.nbytes *= 2;
111
112 ext = spi_nor_get_cmd_ext(nor, op);
113 op->cmd.opcode = (op->cmd.opcode << 8) | ext;
114 op->cmd.nbytes = 2;
115 }
116}
117
118
119
120
121
122
123
124
125
126
127
128static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
129{
130
131 if (object_is_on_stack(op->data.buf.in) ||
132 !virt_addr_valid(op->data.buf.in)) {
133 if (op->data.nbytes > nor->bouncebuf_size)
134 op->data.nbytes = nor->bouncebuf_size;
135 op->data.buf.in = nor->bouncebuf;
136 return true;
137 }
138
139 return false;
140}
141
142
143
144
145
146
147
148
149static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
150{
151 int error;
152
153 error = spi_mem_adjust_op_size(nor->spimem, op);
154 if (error)
155 return error;
156
157 return spi_mem_exec_op(nor->spimem, op);
158}
159
160static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
161 u8 *buf, size_t len)
162{
163 if (spi_nor_protocol_is_dtr(nor->reg_proto))
164 return -EOPNOTSUPP;
165
166 return nor->controller_ops->read_reg(nor, opcode, buf, len);
167}
168
169static int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
170 const u8 *buf, size_t len)
171{
172 if (spi_nor_protocol_is_dtr(nor->reg_proto))
173 return -EOPNOTSUPP;
174
175 return nor->controller_ops->write_reg(nor, opcode, buf, len);
176}
177
178static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
179{
180 if (spi_nor_protocol_is_dtr(nor->write_proto))
181 return -EOPNOTSUPP;
182
183 return nor->controller_ops->erase(nor, offs);
184}
185
186
187
188
189
190
191
192
193
194
195
196static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
197 size_t len, u8 *buf)
198{
199 struct spi_mem_op op =
200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
201 SPI_MEM_OP_ADDR(nor->addr_width, from, 0),
202 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
203 SPI_MEM_OP_DATA_IN(len, buf, 0));
204 bool usebouncebuf;
205 ssize_t nbytes;
206 int error;
207
208 spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
209
210
211 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
212 if (spi_nor_protocol_is_dtr(nor->read_proto))
213 op.dummy.nbytes *= 2;
214
215 usebouncebuf = spi_nor_spimem_bounce(nor, &op);
216
217 if (nor->dirmap.rdesc) {
218 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
219 op.data.nbytes, op.data.buf.in);
220 } else {
221 error = spi_nor_spimem_exec_op(nor, &op);
222 if (error)
223 return error;
224 nbytes = op.data.nbytes;
225 }
226
227 if (usebouncebuf && nbytes > 0)
228 memcpy(buf, op.data.buf.in, nbytes);
229
230 return nbytes;
231}
232
233
234
235
236
237
238
239
240
241
242ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
243{
244 if (nor->spimem)
245 return spi_nor_spimem_read_data(nor, from, len, buf);
246
247 return nor->controller_ops->read(nor, from, len, buf);
248}
249
250
251
252
253
254
255
256
257
258
259
260static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
261 size_t len, const u8 *buf)
262{
263 struct spi_mem_op op =
264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
265 SPI_MEM_OP_ADDR(nor->addr_width, to, 0),
266 SPI_MEM_OP_NO_DUMMY,
267 SPI_MEM_OP_DATA_OUT(len, buf, 0));
268 ssize_t nbytes;
269 int error;
270
271 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
272 op.addr.nbytes = 0;
273
274 spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
275
276 if (spi_nor_spimem_bounce(nor, &op))
277 memcpy(nor->bouncebuf, buf, op.data.nbytes);
278
279 if (nor->dirmap.wdesc) {
280 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
281 op.data.nbytes, op.data.buf.out);
282 } else {
283 error = spi_nor_spimem_exec_op(nor, &op);
284 if (error)
285 return error;
286 nbytes = op.data.nbytes;
287 }
288
289 return nbytes;
290}
291
292
293
294
295
296
297
298
299
300
301ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
302 const u8 *buf)
303{
304 if (nor->spimem)
305 return spi_nor_spimem_write_data(nor, to, len, buf);
306
307 return nor->controller_ops->write(nor, to, len, buf);
308}
309
310
311
312
313
314
315
316int spi_nor_write_enable(struct spi_nor *nor)
317{
318 int ret;
319
320 if (nor->spimem) {
321 struct spi_mem_op op =
322 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0),
323 SPI_MEM_OP_NO_ADDR,
324 SPI_MEM_OP_NO_DUMMY,
325 SPI_MEM_OP_NO_DATA);
326
327 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
328
329 ret = spi_mem_exec_op(nor->spimem, &op);
330 } else {
331 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
332 NULL, 0);
333 }
334
335 if (ret)
336 dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
337
338 return ret;
339}
340
341
342
343
344
345
346
347int spi_nor_write_disable(struct spi_nor *nor)
348{
349 int ret;
350
351 if (nor->spimem) {
352 struct spi_mem_op op =
353 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0),
354 SPI_MEM_OP_NO_ADDR,
355 SPI_MEM_OP_NO_DUMMY,
356 SPI_MEM_OP_NO_DATA);
357
358 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
359
360 ret = spi_mem_exec_op(nor->spimem, &op);
361 } else {
362 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
363 NULL, 0);
364 }
365
366 if (ret)
367 dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
368
369 return ret;
370}
371
372
373
374
375
376
377
378
379
380int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
381{
382 int ret;
383
384 if (nor->spimem) {
385 struct spi_mem_op op =
386 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0),
387 SPI_MEM_OP_NO_ADDR,
388 SPI_MEM_OP_NO_DUMMY,
389 SPI_MEM_OP_DATA_IN(1, sr, 0));
390
391 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
392 op.addr.nbytes = nor->params->rdsr_addr_nbytes;
393 op.dummy.nbytes = nor->params->rdsr_dummy;
394
395
396
397
398 op.data.nbytes = 2;
399 }
400
401 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
402
403 ret = spi_mem_exec_op(nor->spimem, &op);
404 } else {
405 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
406 1);
407 }
408
409 if (ret)
410 dev_dbg(nor->dev, "error %d reading SR\n", ret);
411
412 return ret;
413}
414
415
416
417
418
419
420
421
422
423
424static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
425{
426 int ret;
427
428 if (nor->spimem) {
429 struct spi_mem_op op =
430 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0),
431 SPI_MEM_OP_NO_ADDR,
432 SPI_MEM_OP_NO_DUMMY,
433 SPI_MEM_OP_DATA_IN(1, fsr, 0));
434
435 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
436 op.addr.nbytes = nor->params->rdsr_addr_nbytes;
437 op.dummy.nbytes = nor->params->rdsr_dummy;
438
439
440
441
442 op.data.nbytes = 2;
443 }
444
445 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
446
447 ret = spi_mem_exec_op(nor->spimem, &op);
448 } else {
449 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr,
450 1);
451 }
452
453 if (ret)
454 dev_dbg(nor->dev, "error %d reading FSR\n", ret);
455
456 return ret;
457}
458
459
460
461
462
463
464
465
466
467
468static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
469{
470 int ret;
471
472 if (nor->spimem) {
473 struct spi_mem_op op =
474 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0),
475 SPI_MEM_OP_NO_ADDR,
476 SPI_MEM_OP_NO_DUMMY,
477 SPI_MEM_OP_DATA_IN(1, cr, 0));
478
479 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
480
481 ret = spi_mem_exec_op(nor->spimem, &op);
482 } else {
483 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
484 1);
485 }
486
487 if (ret)
488 dev_dbg(nor->dev, "error %d reading CR\n", ret);
489
490 return ret;
491}
492
493
494
495
496
497
498
499
500
501int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
502{
503 int ret;
504
505 if (nor->spimem) {
506 struct spi_mem_op op =
507 SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
508 SPINOR_OP_EN4B :
509 SPINOR_OP_EX4B,
510 0),
511 SPI_MEM_OP_NO_ADDR,
512 SPI_MEM_OP_NO_DUMMY,
513 SPI_MEM_OP_NO_DATA);
514
515 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
516
517 ret = spi_mem_exec_op(nor->spimem, &op);
518 } else {
519 ret = spi_nor_controller_ops_write_reg(nor,
520 enable ? SPINOR_OP_EN4B :
521 SPINOR_OP_EX4B,
522 NULL, 0);
523 }
524
525 if (ret)
526 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
527
528 return ret;
529}
530
531
532
533
534
535
536
537
538
539
540static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
541{
542 int ret;
543
544 nor->bouncebuf[0] = enable << 7;
545
546 if (nor->spimem) {
547 struct spi_mem_op op =
548 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0),
549 SPI_MEM_OP_NO_ADDR,
550 SPI_MEM_OP_NO_DUMMY,
551 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0));
552
553 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
554
555 ret = spi_mem_exec_op(nor->spimem, &op);
556 } else {
557 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
558 nor->bouncebuf, 1);
559 }
560
561 if (ret)
562 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
563
564 return ret;
565}
566
567
568
569
570
571
572
573
574int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
575{
576 int ret;
577
578 nor->bouncebuf[0] = ear;
579
580 if (nor->spimem) {
581 struct spi_mem_op op =
582 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 0),
583 SPI_MEM_OP_NO_ADDR,
584 SPI_MEM_OP_NO_DUMMY,
585 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0));
586
587 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
588
589 ret = spi_mem_exec_op(nor->spimem, &op);
590 } else {
591 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREAR,
592 nor->bouncebuf, 1);
593 }
594
595 if (ret)
596 dev_dbg(nor->dev, "error %d writing EAR\n", ret);
597
598 return ret;
599}
600
601
602
603
604
605
606
607
608
609int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
610{
611 int ret;
612
613 if (nor->spimem) {
614 struct spi_mem_op op =
615 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0),
616 SPI_MEM_OP_NO_ADDR,
617 SPI_MEM_OP_NO_DUMMY,
618 SPI_MEM_OP_DATA_IN(1, sr, 0));
619
620 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
621
622 ret = spi_mem_exec_op(nor->spimem, &op);
623 } else {
624 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr,
625 1);
626 }
627
628 if (ret)
629 dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
630
631 return ret;
632}
633
634
635
636
637
638
639
640
641static int spi_nor_xsr_ready(struct spi_nor *nor)
642{
643 int ret;
644
645 ret = spi_nor_xread_sr(nor, nor->bouncebuf);
646 if (ret)
647 return ret;
648
649 return !!(nor->bouncebuf[0] & XSR_RDY);
650}
651
652
653
654
655
656static void spi_nor_clear_sr(struct spi_nor *nor)
657{
658 int ret;
659
660 if (nor->spimem) {
661 struct spi_mem_op op =
662 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0),
663 SPI_MEM_OP_NO_ADDR,
664 SPI_MEM_OP_NO_DUMMY,
665 SPI_MEM_OP_NO_DATA);
666
667 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
668
669 ret = spi_mem_exec_op(nor->spimem, &op);
670 } else {
671 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR,
672 NULL, 0);
673 }
674
675 if (ret)
676 dev_dbg(nor->dev, "error %d clearing SR\n", ret);
677}
678
679
680
681
682
683
684
685
686static int spi_nor_sr_ready(struct spi_nor *nor)
687{
688 int ret = spi_nor_read_sr(nor, nor->bouncebuf);
689
690 if (ret)
691 return ret;
692
693 if (nor->flags & SNOR_F_USE_CLSR &&
694 nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
695 if (nor->bouncebuf[0] & SR_E_ERR)
696 dev_err(nor->dev, "Erase Error occurred\n");
697 else
698 dev_err(nor->dev, "Programming Error occurred\n");
699
700 spi_nor_clear_sr(nor);
701
702
703
704
705
706
707
708 ret = spi_nor_write_disable(nor);
709 if (ret)
710 return ret;
711
712 return -EIO;
713 }
714
715 return !(nor->bouncebuf[0] & SR_WIP);
716}
717
718
719
720
721
722static void spi_nor_clear_fsr(struct spi_nor *nor)
723{
724 int ret;
725
726 if (nor->spimem) {
727 struct spi_mem_op op =
728 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0),
729 SPI_MEM_OP_NO_ADDR,
730 SPI_MEM_OP_NO_DUMMY,
731 SPI_MEM_OP_NO_DATA);
732
733 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
734
735 ret = spi_mem_exec_op(nor->spimem, &op);
736 } else {
737 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR,
738 NULL, 0);
739 }
740
741 if (ret)
742 dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
743}
744
745
746
747
748
749
750
751
752static int spi_nor_fsr_ready(struct spi_nor *nor)
753{
754 int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
755
756 if (ret)
757 return ret;
758
759 if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
760 if (nor->bouncebuf[0] & FSR_E_ERR)
761 dev_err(nor->dev, "Erase operation failed.\n");
762 else
763 dev_err(nor->dev, "Program operation failed.\n");
764
765 if (nor->bouncebuf[0] & FSR_PT_ERR)
766 dev_err(nor->dev,
767 "Attempted to modify a protected sector.\n");
768
769 spi_nor_clear_fsr(nor);
770
771
772
773
774
775
776
777 ret = spi_nor_write_disable(nor);
778 if (ret)
779 return ret;
780
781 return -EIO;
782 }
783
784 return !!(nor->bouncebuf[0] & FSR_READY);
785}
786
787
788
789
790
791
792
793static int spi_nor_ready(struct spi_nor *nor)
794{
795 int sr, fsr;
796
797 if (nor->flags & SNOR_F_READY_XSR_RDY)
798 sr = spi_nor_xsr_ready(nor);
799 else
800 sr = spi_nor_sr_ready(nor);
801 if (sr < 0)
802 return sr;
803 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
804 if (fsr < 0)
805 return fsr;
806 return sr && fsr;
807}
808
809
810
811
812
813
814
815
816
817static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
818 unsigned long timeout_jiffies)
819{
820 unsigned long deadline;
821 int timeout = 0, ret;
822
823 deadline = jiffies + timeout_jiffies;
824
825 while (!timeout) {
826 if (time_after_eq(jiffies, deadline))
827 timeout = 1;
828
829 ret = spi_nor_ready(nor);
830 if (ret < 0)
831 return ret;
832 if (ret)
833 return 0;
834
835 cond_resched();
836 }
837
838 dev_dbg(nor->dev, "flash operation timed out\n");
839
840 return -ETIMEDOUT;
841}
842
843
844
845
846
847
848
849
850int spi_nor_wait_till_ready(struct spi_nor *nor)
851{
852 return spi_nor_wait_till_ready_with_timeout(nor,
853 DEFAULT_READY_WAIT_JIFFIES);
854}
855
856
857
858
859
860
861
862
863
864int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
865{
866 int ret;
867
868 ret = spi_nor_write_enable(nor);
869 if (ret)
870 return ret;
871
872 if (nor->spimem) {
873 struct spi_mem_op op =
874 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0),
875 SPI_MEM_OP_NO_ADDR,
876 SPI_MEM_OP_NO_DUMMY,
877 SPI_MEM_OP_DATA_OUT(len, sr, 0));
878
879 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
880
881 ret = spi_mem_exec_op(nor->spimem, &op);
882 } else {
883 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
884 len);
885 }
886
887 if (ret) {
888 dev_dbg(nor->dev, "error %d writing SR\n", ret);
889 return ret;
890 }
891
892 return spi_nor_wait_till_ready(nor);
893}
894
895
896
897
898
899
900
901
902
903static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
904{
905 int ret;
906
907 nor->bouncebuf[0] = sr1;
908
909 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
910 if (ret)
911 return ret;
912
913 ret = spi_nor_read_sr(nor, nor->bouncebuf);
914 if (ret)
915 return ret;
916
917 if (nor->bouncebuf[0] != sr1) {
918 dev_dbg(nor->dev, "SR1: read back test failed\n");
919 return -EIO;
920 }
921
922 return 0;
923}
924
925
926
927
928
929
930
931
932
933
934
935static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
936{
937 int ret;
938 u8 *sr_cr = nor->bouncebuf;
939 u8 cr_written;
940
941
942 if (!(nor->flags & SNOR_F_NO_READ_CR)) {
943 ret = spi_nor_read_cr(nor, &sr_cr[1]);
944 if (ret)
945 return ret;
946 } else if (nor->params->quad_enable) {
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962 sr_cr[1] = SR2_QUAD_EN_BIT1;
963 } else {
964 sr_cr[1] = 0;
965 }
966
967 sr_cr[0] = sr1;
968
969 ret = spi_nor_write_sr(nor, sr_cr, 2);
970 if (ret)
971 return ret;
972
973 if (nor->flags & SNOR_F_NO_READ_CR)
974 return 0;
975
976 cr_written = sr_cr[1];
977
978 ret = spi_nor_read_cr(nor, &sr_cr[1]);
979 if (ret)
980 return ret;
981
982 if (cr_written != sr_cr[1]) {
983 dev_dbg(nor->dev, "CR: read back test failed\n");
984 return -EIO;
985 }
986
987 return 0;
988}
989
990
991
992
993
994
995
996
997
998
999
1000static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
1001{
1002 int ret;
1003 u8 *sr_cr = nor->bouncebuf;
1004 u8 sr_written;
1005
1006
1007 ret = spi_nor_read_sr(nor, sr_cr);
1008 if (ret)
1009 return ret;
1010
1011 sr_cr[1] = cr;
1012
1013 ret = spi_nor_write_sr(nor, sr_cr, 2);
1014 if (ret)
1015 return ret;
1016
1017 sr_written = sr_cr[0];
1018
1019 ret = spi_nor_read_sr(nor, sr_cr);
1020 if (ret)
1021 return ret;
1022
1023 if (sr_written != sr_cr[0]) {
1024 dev_dbg(nor->dev, "SR: Read back test failed\n");
1025 return -EIO;
1026 }
1027
1028 if (nor->flags & SNOR_F_NO_READ_CR)
1029 return 0;
1030
1031 ret = spi_nor_read_cr(nor, &sr_cr[1]);
1032 if (ret)
1033 return ret;
1034
1035 if (cr != sr_cr[1]) {
1036 dev_dbg(nor->dev, "CR: read back test failed\n");
1037 return -EIO;
1038 }
1039
1040 return 0;
1041}
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
1053{
1054 if (nor->flags & SNOR_F_HAS_16BIT_SR)
1055 return spi_nor_write_16bit_sr_and_check(nor, sr1);
1056
1057 return spi_nor_write_sr1_and_check(nor, sr1);
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
1069{
1070 int ret;
1071
1072 ret = spi_nor_write_enable(nor);
1073 if (ret)
1074 return ret;
1075
1076 if (nor->spimem) {
1077 struct spi_mem_op op =
1078 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0),
1079 SPI_MEM_OP_NO_ADDR,
1080 SPI_MEM_OP_NO_DUMMY,
1081 SPI_MEM_OP_DATA_OUT(1, sr2, 0));
1082
1083 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1084
1085 ret = spi_mem_exec_op(nor->spimem, &op);
1086 } else {
1087 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
1088 sr2, 1);
1089 }
1090
1091 if (ret) {
1092 dev_dbg(nor->dev, "error %d writing SR2\n", ret);
1093 return ret;
1094 }
1095
1096 return spi_nor_wait_till_ready(nor);
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1109{
1110 int ret;
1111
1112 if (nor->spimem) {
1113 struct spi_mem_op op =
1114 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0),
1115 SPI_MEM_OP_NO_ADDR,
1116 SPI_MEM_OP_NO_DUMMY,
1117 SPI_MEM_OP_DATA_IN(1, sr2, 0));
1118
1119 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1120
1121 ret = spi_mem_exec_op(nor->spimem, &op);
1122 } else {
1123 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
1124 1);
1125 }
1126
1127 if (ret)
1128 dev_dbg(nor->dev, "error %d reading SR2\n", ret);
1129
1130 return ret;
1131}
1132
1133
1134
1135
1136
1137
1138
1139static int spi_nor_erase_chip(struct spi_nor *nor)
1140{
1141 int ret;
1142
1143 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
1144
1145 if (nor->spimem) {
1146 struct spi_mem_op op =
1147 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0),
1148 SPI_MEM_OP_NO_ADDR,
1149 SPI_MEM_OP_NO_DUMMY,
1150 SPI_MEM_OP_NO_DATA);
1151
1152 spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
1153
1154 ret = spi_mem_exec_op(nor->spimem, &op);
1155 } else {
1156 ret = spi_nor_controller_ops_write_reg(nor,
1157 SPINOR_OP_CHIP_ERASE,
1158 NULL, 0);
1159 }
1160
1161 if (ret)
1162 dev_dbg(nor->dev, "error %d erasing chip\n", ret);
1163
1164 return ret;
1165}
1166
1167static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
1168{
1169 size_t i;
1170
1171 for (i = 0; i < size; i++)
1172 if (table[i][0] == opcode)
1173 return table[i][1];
1174
1175
1176 return opcode;
1177}
1178
1179u8 spi_nor_convert_3to4_read(u8 opcode)
1180{
1181 static const u8 spi_nor_3to4_read[][2] = {
1182 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
1183 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
1184 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
1185 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
1186 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
1187 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
1188 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
1189 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
1190
1191 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
1192 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
1193 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
1194 };
1195
1196 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1197 ARRAY_SIZE(spi_nor_3to4_read));
1198}
1199
1200static u8 spi_nor_convert_3to4_program(u8 opcode)
1201{
1202 static const u8 spi_nor_3to4_program[][2] = {
1203 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
1204 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
1205 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
1206 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
1207 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
1208 };
1209
1210 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1211 ARRAY_SIZE(spi_nor_3to4_program));
1212}
1213
1214static u8 spi_nor_convert_3to4_erase(u8 opcode)
1215{
1216 static const u8 spi_nor_3to4_erase[][2] = {
1217 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
1218 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
1219 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
1220 };
1221
1222 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1223 ARRAY_SIZE(spi_nor_3to4_erase));
1224}
1225
1226static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1227{
1228 return !!nor->params->erase_map.uniform_erase_type;
1229}
1230
1231static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1232{
1233 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1234 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1235 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1236
1237 if (!spi_nor_has_uniform_erase(nor)) {
1238 struct spi_nor_erase_map *map = &nor->params->erase_map;
1239 struct spi_nor_erase_type *erase;
1240 int i;
1241
1242 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1243 erase = &map->erase_type[i];
1244 erase->opcode =
1245 spi_nor_convert_3to4_erase(erase->opcode);
1246 }
1247 }
1248}
1249
1250int spi_nor_lock_and_prep(struct spi_nor *nor)
1251{
1252 int ret = 0;
1253
1254 mutex_lock(&nor->lock);
1255
1256 if (nor->controller_ops && nor->controller_ops->prepare) {
1257 ret = nor->controller_ops->prepare(nor);
1258 if (ret) {
1259 mutex_unlock(&nor->lock);
1260 return ret;
1261 }
1262 }
1263 return ret;
1264}
1265
1266void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1267{
1268 if (nor->controller_ops && nor->controller_ops->unprepare)
1269 nor->controller_ops->unprepare(nor);
1270 mutex_unlock(&nor->lock);
1271}
1272
1273static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
1274{
1275 if (!nor->params->convert_addr)
1276 return addr;
1277
1278 return nor->params->convert_addr(nor, addr);
1279}
1280
1281
1282
1283
1284static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1285{
1286 int i;
1287
1288 addr = spi_nor_convert_addr(nor, addr);
1289
1290 if (nor->spimem) {
1291 struct spi_mem_op op =
1292 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0),
1293 SPI_MEM_OP_ADDR(nor->addr_width, addr, 0),
1294 SPI_MEM_OP_NO_DUMMY,
1295 SPI_MEM_OP_NO_DATA);
1296
1297 spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
1298
1299 return spi_mem_exec_op(nor->spimem, &op);
1300 } else if (nor->controller_ops->erase) {
1301 return spi_nor_controller_ops_erase(nor, addr);
1302 }
1303
1304
1305
1306
1307
1308 for (i = nor->addr_width - 1; i >= 0; i--) {
1309 nor->bouncebuf[i] = addr & 0xff;
1310 addr >>= 8;
1311 }
1312
1313 return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
1314 nor->bouncebuf, nor->addr_width);
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1326 u64 dividend, u32 *remainder)
1327{
1328
1329 *remainder = (u32)dividend & erase->size_mask;
1330 return dividend >> erase->size_shift;
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346static const struct spi_nor_erase_type *
1347spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1348 const struct spi_nor_erase_region *region,
1349 u64 addr, u32 len)
1350{
1351 const struct spi_nor_erase_type *erase;
1352 u32 rem;
1353 int i;
1354 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1355
1356
1357
1358
1359
1360 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1361
1362 if (!(erase_mask & BIT(i)))
1363 continue;
1364
1365 erase = &map->erase_type[i];
1366
1367
1368 if (erase->size > len)
1369 continue;
1370
1371
1372 if (region->offset & SNOR_OVERLAID_REGION)
1373 return erase;
1374
1375 spi_nor_div_by_erase_size(erase, addr, &rem);
1376 if (rem)
1377 continue;
1378 else
1379 return erase;
1380 }
1381
1382 return NULL;
1383}
1384
1385static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
1386{
1387 return region->offset & SNOR_LAST_REGION;
1388}
1389
1390static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
1391{
1392 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
1393}
1394
1395
1396
1397
1398
1399
1400
1401struct spi_nor_erase_region *
1402spi_nor_region_next(struct spi_nor_erase_region *region)
1403{
1404 if (spi_nor_region_is_last(region))
1405 return NULL;
1406 region++;
1407 return region;
1408}
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419static struct spi_nor_erase_region *
1420spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1421{
1422 struct spi_nor_erase_region *region = map->regions;
1423 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1424 u64 region_end = region_start + region->size;
1425
1426 while (addr < region_start || addr >= region_end) {
1427 region = spi_nor_region_next(region);
1428 if (!region)
1429 return ERR_PTR(-EINVAL);
1430
1431 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1432 region_end = region_start + region->size;
1433 }
1434
1435 return region;
1436}
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446static struct spi_nor_erase_command *
1447spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1448 const struct spi_nor_erase_type *erase)
1449{
1450 struct spi_nor_erase_command *cmd;
1451
1452 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1453 if (!cmd)
1454 return ERR_PTR(-ENOMEM);
1455
1456 INIT_LIST_HEAD(&cmd->list);
1457 cmd->opcode = erase->opcode;
1458 cmd->count = 1;
1459
1460 if (region->offset & SNOR_OVERLAID_REGION)
1461 cmd->size = region->size;
1462 else
1463 cmd->size = erase->size;
1464
1465 return cmd;
1466}
1467
1468
1469
1470
1471
1472static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1473{
1474 struct spi_nor_erase_command *cmd, *next;
1475
1476 list_for_each_entry_safe(cmd, next, erase_list, list) {
1477 list_del(&cmd->list);
1478 kfree(cmd);
1479 }
1480}
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1496 struct list_head *erase_list,
1497 u64 addr, u32 len)
1498{
1499 const struct spi_nor_erase_map *map = &nor->params->erase_map;
1500 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1501 struct spi_nor_erase_region *region;
1502 struct spi_nor_erase_command *cmd = NULL;
1503 u64 region_end;
1504 int ret = -EINVAL;
1505
1506 region = spi_nor_find_erase_region(map, addr);
1507 if (IS_ERR(region))
1508 return PTR_ERR(region);
1509
1510 region_end = spi_nor_region_end(region);
1511
1512 while (len) {
1513 erase = spi_nor_find_best_erase_type(map, region, addr, len);
1514 if (!erase)
1515 goto destroy_erase_cmd_list;
1516
1517 if (prev_erase != erase ||
1518 region->offset & SNOR_OVERLAID_REGION) {
1519 cmd = spi_nor_init_erase_cmd(region, erase);
1520 if (IS_ERR(cmd)) {
1521 ret = PTR_ERR(cmd);
1522 goto destroy_erase_cmd_list;
1523 }
1524
1525 list_add_tail(&cmd->list, erase_list);
1526 } else {
1527 cmd->count++;
1528 }
1529
1530 addr += cmd->size;
1531 len -= cmd->size;
1532
1533 if (len && addr >= region_end) {
1534 region = spi_nor_region_next(region);
1535 if (!region)
1536 goto destroy_erase_cmd_list;
1537 region_end = spi_nor_region_end(region);
1538 }
1539
1540 prev_erase = erase;
1541 }
1542
1543 return 0;
1544
1545destroy_erase_cmd_list:
1546 spi_nor_destroy_erase_cmd_list(erase_list);
1547 return ret;
1548}
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1562{
1563 LIST_HEAD(erase_list);
1564 struct spi_nor_erase_command *cmd, *next;
1565 int ret;
1566
1567 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1568 if (ret)
1569 return ret;
1570
1571 list_for_each_entry_safe(cmd, next, &erase_list, list) {
1572 nor->erase_opcode = cmd->opcode;
1573 while (cmd->count) {
1574 ret = spi_nor_write_enable(nor);
1575 if (ret)
1576 goto destroy_erase_cmd_list;
1577
1578 ret = spi_nor_erase_sector(nor, addr);
1579 if (ret)
1580 goto destroy_erase_cmd_list;
1581
1582 addr += cmd->size;
1583 cmd->count--;
1584
1585 ret = spi_nor_wait_till_ready(nor);
1586 if (ret)
1587 goto destroy_erase_cmd_list;
1588 }
1589 list_del(&cmd->list);
1590 kfree(cmd);
1591 }
1592
1593 return 0;
1594
1595destroy_erase_cmd_list:
1596 spi_nor_destroy_erase_cmd_list(&erase_list);
1597 return ret;
1598}
1599
1600
1601
1602
1603
1604static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1605{
1606 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1607 u32 addr, len;
1608 uint32_t rem;
1609 int ret;
1610
1611 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1612 (long long)instr->len);
1613
1614 if (spi_nor_has_uniform_erase(nor)) {
1615 div_u64_rem(instr->len, mtd->erasesize, &rem);
1616 if (rem)
1617 return -EINVAL;
1618 }
1619
1620 addr = instr->addr;
1621 len = instr->len;
1622
1623 ret = spi_nor_lock_and_prep(nor);
1624 if (ret)
1625 return ret;
1626
1627
1628 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1629 unsigned long timeout;
1630
1631 ret = spi_nor_write_enable(nor);
1632 if (ret)
1633 goto erase_err;
1634
1635 ret = spi_nor_erase_chip(nor);
1636 if (ret)
1637 goto erase_err;
1638
1639
1640
1641
1642
1643
1644
1645 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1646 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1647 (unsigned long)(mtd->size / SZ_2M));
1648 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1649 if (ret)
1650 goto erase_err;
1651
1652
1653
1654
1655
1656
1657
1658 } else if (spi_nor_has_uniform_erase(nor)) {
1659 while (len) {
1660 ret = spi_nor_write_enable(nor);
1661 if (ret)
1662 goto erase_err;
1663
1664 ret = spi_nor_erase_sector(nor, addr);
1665 if (ret)
1666 goto erase_err;
1667
1668 addr += mtd->erasesize;
1669 len -= mtd->erasesize;
1670
1671 ret = spi_nor_wait_till_ready(nor);
1672 if (ret)
1673 goto erase_err;
1674 }
1675
1676
1677 } else {
1678 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1679 if (ret)
1680 goto erase_err;
1681 }
1682
1683 ret = spi_nor_write_disable(nor);
1684
1685erase_err:
1686 spi_nor_unlock_and_unprep(nor);
1687
1688 return ret;
1689}
1690
1691static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor)
1692{
1693 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1694
1695 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6)
1696 return mask | SR_BP3_BIT6;
1697
1698 if (nor->flags & SNOR_F_HAS_4BIT_BP)
1699 return mask | SR_BP3;
1700
1701 return mask;
1702}
1703
1704static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
1705{
1706 if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
1707 return SR_TB_BIT6;
1708 else
1709 return SR_TB_BIT5;
1710}
1711
1712static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
1713{
1714 unsigned int bp_slots, bp_slots_needed;
1715 u8 mask = spi_nor_get_sr_bp_mask(nor);
1716
1717
1718 bp_slots = (1 << hweight8(mask)) - 2;
1719 bp_slots_needed = ilog2(nor->info->n_sectors);
1720
1721 if (bp_slots_needed > bp_slots)
1722 return nor->info->sector_size <<
1723 (bp_slots_needed - bp_slots);
1724 else
1725 return nor->info->sector_size;
1726}
1727
1728static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
1729 uint64_t *len)
1730{
1731 struct mtd_info *mtd = &nor->mtd;
1732 u64 min_prot_len;
1733 u8 mask = spi_nor_get_sr_bp_mask(nor);
1734 u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1735 u8 bp, val = sr & mask;
1736
1737 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6)
1738 val = (val & ~SR_BP3_BIT6) | SR_BP3;
1739
1740 bp = val >> SR_BP_SHIFT;
1741
1742 if (!bp) {
1743
1744 *ofs = 0;
1745 *len = 0;
1746 return;
1747 }
1748
1749 min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1750 *len = min_prot_len << (bp - 1);
1751
1752 if (*len > mtd->size)
1753 *len = mtd->size;
1754
1755 if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
1756 *ofs = 0;
1757 else
1758 *ofs = mtd->size - *len;
1759}
1760
1761
1762
1763
1764
1765static int spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
1766 uint64_t len, u8 sr, bool locked)
1767{
1768 loff_t lock_offs;
1769 uint64_t lock_len;
1770
1771 if (!len)
1772 return 1;
1773
1774 spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
1775
1776 if (locked)
1777
1778 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1779 else
1780
1781 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1782}
1783
1784static int spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1785 u8 sr)
1786{
1787 return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
1788}
1789
1790static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1791 u8 sr)
1792{
1793 return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1830{
1831 struct mtd_info *mtd = &nor->mtd;
1832 u64 min_prot_len;
1833 int ret, status_old, status_new;
1834 u8 mask = spi_nor_get_sr_bp_mask(nor);
1835 u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1836 u8 pow, val;
1837 loff_t lock_len;
1838 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1839 bool use_top;
1840
1841 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1842 if (ret)
1843 return ret;
1844
1845 status_old = nor->bouncebuf[0];
1846
1847
1848 if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
1849 return 0;
1850
1851
1852 if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
1853 can_be_bottom = false;
1854
1855
1856 if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1857 status_old))
1858 can_be_top = false;
1859
1860 if (!can_be_bottom && !can_be_top)
1861 return -EINVAL;
1862
1863
1864 use_top = can_be_top;
1865
1866
1867 if (use_top)
1868 lock_len = mtd->size - ofs;
1869 else
1870 lock_len = ofs + len;
1871
1872 if (lock_len == mtd->size) {
1873 val = mask;
1874 } else {
1875 min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1876 pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
1877 val = pow << SR_BP_SHIFT;
1878
1879 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
1880 val = (val & ~SR_BP3) | SR_BP3_BIT6;
1881
1882 if (val & ~mask)
1883 return -EINVAL;
1884
1885
1886 if (!(val & mask))
1887 return -EINVAL;
1888 }
1889
1890 status_new = (status_old & ~mask & ~tb_mask) | val;
1891
1892
1893 status_new |= SR_SRWD;
1894
1895 if (!use_top)
1896 status_new |= tb_mask;
1897
1898
1899 if (status_new == status_old)
1900 return 0;
1901
1902
1903 if ((status_new & mask) < (status_old & mask))
1904 return -EINVAL;
1905
1906 return spi_nor_write_sr_and_check(nor, status_new);
1907}
1908
1909
1910
1911
1912
1913
1914static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1915{
1916 struct mtd_info *mtd = &nor->mtd;
1917 u64 min_prot_len;
1918 int ret, status_old, status_new;
1919 u8 mask = spi_nor_get_sr_bp_mask(nor);
1920 u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1921 u8 pow, val;
1922 loff_t lock_len;
1923 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1924 bool use_top;
1925
1926 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1927 if (ret)
1928 return ret;
1929
1930 status_old = nor->bouncebuf[0];
1931
1932
1933 if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
1934 return 0;
1935
1936
1937 if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
1938 can_be_top = false;
1939
1940
1941 if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1942 status_old))
1943 can_be_bottom = false;
1944
1945 if (!can_be_bottom && !can_be_top)
1946 return -EINVAL;
1947
1948
1949 use_top = can_be_top;
1950
1951
1952 if (use_top)
1953 lock_len = mtd->size - (ofs + len);
1954 else
1955 lock_len = ofs;
1956
1957 if (lock_len == 0) {
1958 val = 0;
1959 } else {
1960 min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1961 pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
1962 val = pow << SR_BP_SHIFT;
1963
1964 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
1965 val = (val & ~SR_BP3) | SR_BP3_BIT6;
1966
1967
1968 if (val & ~mask)
1969 return -EINVAL;
1970 }
1971
1972 status_new = (status_old & ~mask & ~tb_mask) | val;
1973
1974
1975 if (lock_len == 0)
1976 status_new &= ~SR_SRWD;
1977
1978 if (!use_top)
1979 status_new |= tb_mask;
1980
1981
1982 if (status_new == status_old)
1983 return 0;
1984
1985
1986 if ((status_new & mask) > (status_old & mask))
1987 return -EINVAL;
1988
1989 return spi_nor_write_sr_and_check(nor, status_new);
1990}
1991
1992
1993
1994
1995
1996
1997
1998
1999static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
2000{
2001 int ret;
2002
2003 ret = spi_nor_read_sr(nor, nor->bouncebuf);
2004 if (ret)
2005 return ret;
2006
2007 return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
2008}
2009
2010static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
2011 .lock = spi_nor_sr_lock,
2012 .unlock = spi_nor_sr_unlock,
2013 .is_locked = spi_nor_sr_is_locked,
2014};
2015
2016static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2017{
2018 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2019 int ret;
2020
2021 ret = spi_nor_lock_and_prep(nor);
2022 if (ret)
2023 return ret;
2024
2025 ret = nor->params->locking_ops->lock(nor, ofs, len);
2026
2027 spi_nor_unlock_and_unprep(nor);
2028 return ret;
2029}
2030
2031static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2032{
2033 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2034 int ret;
2035
2036 ret = spi_nor_lock_and_prep(nor);
2037 if (ret)
2038 return ret;
2039
2040 ret = nor->params->locking_ops->unlock(nor, ofs, len);
2041
2042 spi_nor_unlock_and_unprep(nor);
2043 return ret;
2044}
2045
2046static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2047{
2048 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2049 int ret;
2050
2051 ret = spi_nor_lock_and_prep(nor);
2052 if (ret)
2053 return ret;
2054
2055 ret = nor->params->locking_ops->is_locked(nor, ofs, len);
2056
2057 spi_nor_unlock_and_unprep(nor);
2058 return ret;
2059}
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
2071{
2072 int ret;
2073
2074 ret = spi_nor_read_sr(nor, nor->bouncebuf);
2075 if (ret)
2076 return ret;
2077
2078 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
2079 return 0;
2080
2081 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
2082
2083 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
2084}
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
2096{
2097 int ret;
2098
2099 if (nor->flags & SNOR_F_NO_READ_CR)
2100 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
2101
2102 ret = spi_nor_read_cr(nor, nor->bouncebuf);
2103 if (ret)
2104 return ret;
2105
2106 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
2107 return 0;
2108
2109 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
2110
2111 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
2127{
2128 u8 *sr2 = nor->bouncebuf;
2129 int ret;
2130 u8 sr2_written;
2131
2132
2133 ret = spi_nor_read_sr2(nor, sr2);
2134 if (ret)
2135 return ret;
2136 if (*sr2 & SR2_QUAD_EN_BIT7)
2137 return 0;
2138
2139
2140 *sr2 |= SR2_QUAD_EN_BIT7;
2141
2142 ret = spi_nor_write_sr2(nor, sr2);
2143 if (ret)
2144 return ret;
2145
2146 sr2_written = *sr2;
2147
2148
2149 ret = spi_nor_read_sr2(nor, sr2);
2150 if (ret)
2151 return ret;
2152
2153 if (*sr2 != sr2_written) {
2154 dev_dbg(nor->dev, "SR2: Read back test failed\n");
2155 return -EIO;
2156 }
2157
2158 return 0;
2159}
2160
2161static const struct spi_nor_manufacturer *manufacturers[] = {
2162 &spi_nor_atmel,
2163 &spi_nor_catalyst,
2164 &spi_nor_eon,
2165 &spi_nor_esmt,
2166 &spi_nor_everspin,
2167 &spi_nor_fujitsu,
2168 &spi_nor_gigadevice,
2169 &spi_nor_intel,
2170 &spi_nor_issi,
2171 &spi_nor_macronix,
2172 &spi_nor_micron,
2173 &spi_nor_st,
2174 &spi_nor_spansion,
2175 &spi_nor_sst,
2176 &spi_nor_winbond,
2177 &spi_nor_xilinx,
2178 &spi_nor_xmc,
2179};
2180
2181static const struct flash_info *
2182spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts,
2183 const u8 *id)
2184{
2185 unsigned int i;
2186
2187 for (i = 0; i < nparts; i++) {
2188 if (parts[i].id_len &&
2189 !memcmp(parts[i].id, id, parts[i].id_len))
2190 return &parts[i];
2191 }
2192
2193 return NULL;
2194}
2195
2196static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2197{
2198 const struct flash_info *info;
2199 u8 *id = nor->bouncebuf;
2200 unsigned int i;
2201 int ret;
2202
2203 if (nor->spimem) {
2204 struct spi_mem_op op =
2205 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
2206 SPI_MEM_OP_NO_ADDR,
2207 SPI_MEM_OP_NO_DUMMY,
2208 SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
2209
2210 ret = spi_mem_exec_op(nor->spimem, &op);
2211 } else {
2212 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
2213 SPI_NOR_MAX_ID_LEN);
2214 }
2215 if (ret) {
2216 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
2217 return ERR_PTR(ret);
2218 }
2219
2220 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
2221 info = spi_nor_search_part_by_id(manufacturers[i]->parts,
2222 manufacturers[i]->nparts,
2223 id);
2224 if (info) {
2225 nor->manufacturer = manufacturers[i];
2226 return info;
2227 }
2228 }
2229
2230 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2231 SPI_NOR_MAX_ID_LEN, id);
2232 return ERR_PTR(-ENODEV);
2233}
2234
2235static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2236 size_t *retlen, u_char *buf)
2237{
2238 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2239 ssize_t ret;
2240
2241 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2242
2243 ret = spi_nor_lock_and_prep(nor);
2244 if (ret)
2245 return ret;
2246
2247 while (len) {
2248 loff_t addr = from;
2249
2250 addr = spi_nor_convert_addr(nor, addr);
2251
2252 ret = spi_nor_read_data(nor, addr, len, buf);
2253 if (ret == 0) {
2254
2255 ret = -EIO;
2256 goto read_err;
2257 }
2258 if (ret < 0)
2259 goto read_err;
2260
2261 WARN_ON(ret > len);
2262 *retlen += ret;
2263 buf += ret;
2264 from += ret;
2265 len -= ret;
2266 }
2267 ret = 0;
2268
2269read_err:
2270 spi_nor_unlock_and_unprep(nor);
2271 return ret;
2272}
2273
2274
2275
2276
2277
2278
2279static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2280 size_t *retlen, const u_char *buf)
2281{
2282 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2283 size_t page_offset, page_remain, i;
2284 ssize_t ret;
2285
2286 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2287
2288 ret = spi_nor_lock_and_prep(nor);
2289 if (ret)
2290 return ret;
2291
2292 for (i = 0; i < len; ) {
2293 ssize_t written;
2294 loff_t addr = to + i;
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304 if (hweight32(nor->page_size) == 1) {
2305 page_offset = addr & (nor->page_size - 1);
2306 } else {
2307 uint64_t aux = addr;
2308
2309 page_offset = do_div(aux, nor->page_size);
2310 }
2311
2312 page_remain = min_t(size_t,
2313 nor->page_size - page_offset, len - i);
2314
2315 addr = spi_nor_convert_addr(nor, addr);
2316
2317 ret = spi_nor_write_enable(nor);
2318 if (ret)
2319 goto write_err;
2320
2321 ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
2322 if (ret < 0)
2323 goto write_err;
2324 written = ret;
2325
2326 ret = spi_nor_wait_till_ready(nor);
2327 if (ret)
2328 goto write_err;
2329 *retlen += written;
2330 i += written;
2331 }
2332
2333write_err:
2334 spi_nor_unlock_and_unprep(nor);
2335 return ret;
2336}
2337
2338static int spi_nor_check(struct spi_nor *nor)
2339{
2340 if (!nor->dev ||
2341 (!nor->spimem && !nor->controller_ops) ||
2342 (!nor->spimem && nor->controller_ops &&
2343 (!nor->controller_ops->read ||
2344 !nor->controller_ops->write ||
2345 !nor->controller_ops->read_reg ||
2346 !nor->controller_ops->write_reg))) {
2347 pr_err("spi-nor: please fill all the necessary fields!\n");
2348 return -EINVAL;
2349 }
2350
2351 if (nor->spimem && nor->controller_ops) {
2352 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2353 return -EINVAL;
2354 }
2355
2356 return 0;
2357}
2358
2359void
2360spi_nor_set_read_settings(struct spi_nor_read_command *read,
2361 u8 num_mode_clocks,
2362 u8 num_wait_states,
2363 u8 opcode,
2364 enum spi_nor_protocol proto)
2365{
2366 read->num_mode_clocks = num_mode_clocks;
2367 read->num_wait_states = num_wait_states;
2368 read->opcode = opcode;
2369 read->proto = proto;
2370}
2371
2372void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
2373 enum spi_nor_protocol proto)
2374{
2375 pp->opcode = opcode;
2376 pp->proto = proto;
2377}
2378
2379static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2380{
2381 size_t i;
2382
2383 for (i = 0; i < size; i++)
2384 if (table[i][0] == (int)hwcaps)
2385 return table[i][1];
2386
2387 return -EINVAL;
2388}
2389
2390int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2391{
2392 static const int hwcaps_read2cmd[][2] = {
2393 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2394 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2395 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2396 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2397 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2398 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2399 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2400 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2401 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2402 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2403 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2404 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2405 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2406 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2407 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2408 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
2409 };
2410
2411 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2412 ARRAY_SIZE(hwcaps_read2cmd));
2413}
2414
2415static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2416{
2417 static const int hwcaps_pp2cmd[][2] = {
2418 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2419 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2420 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2421 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2422 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2423 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2424 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2425 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
2426 };
2427
2428 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2429 ARRAY_SIZE(hwcaps_pp2cmd));
2430}
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440static int spi_nor_spimem_check_op(struct spi_nor *nor,
2441 struct spi_mem_op *op)
2442{
2443
2444
2445
2446
2447
2448
2449 op->addr.nbytes = 4;
2450 if (!spi_mem_supports_op(nor->spimem, op)) {
2451 if (nor->mtd.size > SZ_16M)
2452 return -EOPNOTSUPP;
2453
2454
2455 op->addr.nbytes = 3;
2456 if (!spi_mem_supports_op(nor->spimem, op))
2457 return -EOPNOTSUPP;
2458 }
2459
2460 return 0;
2461}
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2472 const struct spi_nor_read_command *read)
2473{
2474 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0),
2475 SPI_MEM_OP_ADDR(3, 0, 0),
2476 SPI_MEM_OP_DUMMY(1, 0),
2477 SPI_MEM_OP_DATA_IN(1, NULL, 0));
2478
2479 spi_nor_spimem_setup_op(nor, &op, read->proto);
2480
2481
2482 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
2483 if (spi_nor_protocol_is_dtr(nor->read_proto))
2484 op.dummy.nbytes *= 2;
2485
2486 return spi_nor_spimem_check_op(nor, &op);
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2498 const struct spi_nor_pp_command *pp)
2499{
2500 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0),
2501 SPI_MEM_OP_ADDR(3, 0, 0),
2502 SPI_MEM_OP_NO_DUMMY,
2503 SPI_MEM_OP_DATA_OUT(1, NULL, 0));
2504
2505 spi_nor_spimem_setup_op(nor, &op, pp->proto);
2506
2507 return spi_nor_spimem_check_op(nor, &op);
2508}
2509
2510
2511
2512
2513
2514
2515
2516
2517static void
2518spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
2519{
2520 struct spi_nor_flash_parameter *params = nor->params;
2521 unsigned int cap;
2522
2523
2524 *hwcaps &= ~SNOR_HWCAPS_X_X_X;
2525
2526
2527
2528
2529
2530 if (nor->flags & SNOR_F_BROKEN_RESET)
2531 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
2532
2533 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2534 int rdidx, ppidx;
2535
2536 if (!(*hwcaps & BIT(cap)))
2537 continue;
2538
2539 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2540 if (rdidx >= 0 &&
2541 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx]))
2542 *hwcaps &= ~BIT(cap);
2543
2544 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2545 if (ppidx < 0)
2546 continue;
2547
2548 if (spi_nor_spimem_check_pp(nor,
2549 ¶ms->page_programs[ppidx]))
2550 *hwcaps &= ~BIT(cap);
2551 }
2552}
2553
2554
2555
2556
2557
2558
2559
2560void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2561 u8 opcode)
2562{
2563 erase->size = size;
2564 erase->opcode = opcode;
2565
2566 erase->size_shift = ffs(erase->size) - 1;
2567 erase->size_mask = (1 << erase->size_shift) - 1;
2568}
2569
2570
2571
2572
2573
2574
2575
2576
2577void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2578 u8 erase_mask, u64 flash_size)
2579{
2580
2581 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2582 SNOR_LAST_REGION;
2583 map->uniform_region.size = flash_size;
2584 map->regions = &map->uniform_region;
2585 map->uniform_erase_type = erase_mask;
2586}
2587
2588int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2589 const struct sfdp_parameter_header *bfpt_header,
2590 const struct sfdp_bfpt *bfpt,
2591 struct spi_nor_flash_parameter *params)
2592{
2593 int ret;
2594
2595 if (nor->manufacturer && nor->manufacturer->fixups &&
2596 nor->manufacturer->fixups->post_bfpt) {
2597 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2598 bfpt, params);
2599 if (ret)
2600 return ret;
2601 }
2602
2603 if (nor->info->fixups && nor->info->fixups->post_bfpt)
2604 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
2605 params);
2606
2607 return 0;
2608}
2609
2610static int spi_nor_select_read(struct spi_nor *nor,
2611 u32 shared_hwcaps)
2612{
2613 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2614 const struct spi_nor_read_command *read;
2615
2616 if (best_match < 0)
2617 return -EINVAL;
2618
2619 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2620 if (cmd < 0)
2621 return -EINVAL;
2622
2623 read = &nor->params->reads[cmd];
2624 nor->read_opcode = read->opcode;
2625 nor->read_proto = read->proto;
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2638 return 0;
2639}
2640
2641static int spi_nor_select_pp(struct spi_nor *nor,
2642 u32 shared_hwcaps)
2643{
2644 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2645 const struct spi_nor_pp_command *pp;
2646
2647 if (best_match < 0)
2648 return -EINVAL;
2649
2650 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2651 if (cmd < 0)
2652 return -EINVAL;
2653
2654 pp = &nor->params->page_programs[cmd];
2655 nor->program_opcode = pp->opcode;
2656 nor->write_proto = pp->proto;
2657 return 0;
2658}
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672static const struct spi_nor_erase_type *
2673spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
2674 const u32 wanted_size)
2675{
2676 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2677 int i;
2678 u8 uniform_erase_type = map->uniform_erase_type;
2679
2680 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2681 if (!(uniform_erase_type & BIT(i)))
2682 continue;
2683
2684 tested_erase = &map->erase_type[i];
2685
2686
2687
2688
2689
2690 if (tested_erase->size == wanted_size) {
2691 erase = tested_erase;
2692 break;
2693 }
2694
2695
2696
2697
2698
2699 if (!erase && tested_erase->size)
2700 erase = tested_erase;
2701
2702 }
2703
2704 if (!erase)
2705 return NULL;
2706
2707
2708 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
2709 map->uniform_erase_type |= BIT(erase - map->erase_type);
2710 return erase;
2711}
2712
2713static int spi_nor_select_erase(struct spi_nor *nor)
2714{
2715 struct spi_nor_erase_map *map = &nor->params->erase_map;
2716 const struct spi_nor_erase_type *erase = NULL;
2717 struct mtd_info *mtd = &nor->mtd;
2718 u32 wanted_size = nor->info->sector_size;
2719 int i;
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
2730
2731 wanted_size = 4096u;
2732#endif
2733
2734 if (spi_nor_has_uniform_erase(nor)) {
2735 erase = spi_nor_select_uniform_erase(map, wanted_size);
2736 if (!erase)
2737 return -EINVAL;
2738 nor->erase_opcode = erase->opcode;
2739 mtd->erasesize = erase->size;
2740 return 0;
2741 }
2742
2743
2744
2745
2746
2747 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2748 if (map->erase_type[i].size) {
2749 erase = &map->erase_type[i];
2750 break;
2751 }
2752 }
2753
2754 if (!erase)
2755 return -EINVAL;
2756
2757 mtd->erasesize = erase->size;
2758 return 0;
2759}
2760
2761static int spi_nor_default_setup(struct spi_nor *nor,
2762 const struct spi_nor_hwcaps *hwcaps)
2763{
2764 struct spi_nor_flash_parameter *params = nor->params;
2765 u32 ignored_mask, shared_mask;
2766 int err;
2767
2768
2769
2770
2771
2772 shared_mask = hwcaps->mask & params->hwcaps.mask;
2773
2774 if (nor->spimem) {
2775
2776
2777
2778
2779
2780 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
2781 } else {
2782
2783
2784
2785
2786
2787 ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
2788 if (shared_mask & ignored_mask) {
2789 dev_dbg(nor->dev,
2790 "SPI n-n-n protocols are not supported.\n");
2791 shared_mask &= ~ignored_mask;
2792 }
2793 }
2794
2795
2796 err = spi_nor_select_read(nor, shared_mask);
2797 if (err) {
2798 dev_dbg(nor->dev,
2799 "can't select read settings supported by both the SPI controller and memory.\n");
2800 return err;
2801 }
2802
2803
2804 err = spi_nor_select_pp(nor, shared_mask);
2805 if (err) {
2806 dev_dbg(nor->dev,
2807 "can't select write settings supported by both the SPI controller and memory.\n");
2808 return err;
2809 }
2810
2811
2812 err = spi_nor_select_erase(nor);
2813 if (err) {
2814 dev_dbg(nor->dev,
2815 "can't select erase settings supported by both the SPI controller and memory.\n");
2816 return err;
2817 }
2818
2819 return 0;
2820}
2821
2822static int spi_nor_setup(struct spi_nor *nor,
2823 const struct spi_nor_hwcaps *hwcaps)
2824{
2825 if (!nor->params->setup)
2826 return 0;
2827
2828 return nor->params->setup(nor, hwcaps);
2829}
2830
2831
2832
2833
2834
2835
2836static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2837{
2838 if (nor->manufacturer && nor->manufacturer->fixups &&
2839 nor->manufacturer->fixups->default_init)
2840 nor->manufacturer->fixups->default_init(nor);
2841
2842 if (nor->info->fixups && nor->info->fixups->default_init)
2843 nor->info->fixups->default_init(nor);
2844}
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854static void spi_nor_sfdp_init_params(struct spi_nor *nor)
2855{
2856 struct spi_nor_flash_parameter sfdp_params;
2857
2858 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2859
2860 if (spi_nor_parse_sfdp(nor, nor->params)) {
2861 memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2862 nor->addr_width = 0;
2863 nor->flags &= ~SNOR_F_4B_OPCODES;
2864 }
2865}
2866
2867
2868
2869
2870
2871
2872static void spi_nor_info_init_params(struct spi_nor *nor)
2873{
2874 struct spi_nor_flash_parameter *params = nor->params;
2875 struct spi_nor_erase_map *map = ¶ms->erase_map;
2876 const struct flash_info *info = nor->info;
2877 struct device_node *np = spi_nor_get_flash_node(nor);
2878 u8 i, erase_mask;
2879
2880
2881 params->quad_enable = spi_nor_sr2_bit1_quad_enable;
2882 params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
2883 params->setup = spi_nor_default_setup;
2884
2885 nor->flags |= SNOR_F_HAS_16BIT_SR;
2886
2887
2888 params->writesize = 1;
2889 params->size = (u64)info->sector_size * info->n_sectors;
2890 params->page_size = info->page_size;
2891
2892 if (!(info->flags & SPI_NOR_NO_FR)) {
2893
2894 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2895
2896
2897 if (np && !of_property_read_bool(np, "m25p,fast-read"))
2898 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
2899 }
2900
2901
2902 params->hwcaps.mask |= SNOR_HWCAPS_READ;
2903 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
2904 0, 0, SPINOR_OP_READ,
2905 SNOR_PROTO_1_1_1);
2906
2907 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
2908 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
2909 0, 8, SPINOR_OP_READ_FAST,
2910 SNOR_PROTO_1_1_1);
2911
2912 if (info->flags & SPI_NOR_DUAL_READ) {
2913 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2914 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
2915 0, 8, SPINOR_OP_READ_1_1_2,
2916 SNOR_PROTO_1_1_2);
2917 }
2918
2919 if (info->flags & SPI_NOR_QUAD_READ) {
2920 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2921 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
2922 0, 8, SPINOR_OP_READ_1_1_4,
2923 SNOR_PROTO_1_1_4);
2924 }
2925
2926 if (info->flags & SPI_NOR_OCTAL_READ) {
2927 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2928 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
2929 0, 8, SPINOR_OP_READ_1_1_8,
2930 SNOR_PROTO_1_1_8);
2931 }
2932
2933 if (info->flags & SPI_NOR_OCTAL_DTR_READ) {
2934 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
2935 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
2936 0, 20, SPINOR_OP_READ_FAST,
2937 SNOR_PROTO_8_8_8_DTR);
2938 }
2939
2940
2941 params->hwcaps.mask |= SNOR_HWCAPS_PP;
2942 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
2943 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2944
2945 if (info->flags & SPI_NOR_OCTAL_DTR_PP) {
2946 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
2947
2948
2949
2950
2951 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR],
2952 SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
2953 }
2954
2955
2956
2957
2958
2959 erase_mask = 0;
2960 i = 0;
2961 if (info->flags & SECT_4K_PMC) {
2962 erase_mask |= BIT(i);
2963 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2964 SPINOR_OP_BE_4K_PMC);
2965 i++;
2966 } else if (info->flags & SECT_4K) {
2967 erase_mask |= BIT(i);
2968 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2969 SPINOR_OP_BE_4K);
2970 i++;
2971 }
2972 erase_mask |= BIT(i);
2973 spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
2974 SPINOR_OP_SE);
2975 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2976}
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
2989{
2990 if (nor->manufacturer && nor->manufacturer->fixups &&
2991 nor->manufacturer->fixups->post_sfdp)
2992 nor->manufacturer->fixups->post_sfdp(nor);
2993
2994 if (nor->info->fixups && nor->info->fixups->post_sfdp)
2995 nor->info->fixups->post_sfdp(nor);
2996}
2997
2998
2999
3000
3001
3002
3003
3004
3005static void spi_nor_late_init_params(struct spi_nor *nor)
3006{
3007
3008
3009
3010
3011 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
3012 nor->params->locking_ops = &spi_nor_sr_locking_ops;
3013}
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052static int spi_nor_init_params(struct spi_nor *nor)
3053{
3054 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
3055 if (!nor->params)
3056 return -ENOMEM;
3057
3058 spi_nor_info_init_params(nor);
3059
3060 spi_nor_manufacturer_init_params(nor);
3061
3062 if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
3063 SPI_NOR_OCTAL_READ | SPI_NOR_OCTAL_DTR_READ)) &&
3064 !(nor->info->flags & SPI_NOR_SKIP_SFDP))
3065 spi_nor_sfdp_init_params(nor);
3066
3067 spi_nor_post_sfdp_fixups(nor);
3068
3069 spi_nor_late_init_params(nor);
3070
3071 return 0;
3072}
3073
3074
3075
3076
3077
3078
3079
3080static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
3081{
3082 int ret;
3083
3084 if (!nor->params->octal_dtr_enable)
3085 return 0;
3086
3087 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
3088 nor->write_proto == SNOR_PROTO_8_8_8_DTR))
3089 return 0;
3090
3091 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
3092 return 0;
3093
3094 ret = nor->params->octal_dtr_enable(nor, enable);
3095 if (ret)
3096 return ret;
3097
3098 if (enable)
3099 nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
3100 else
3101 nor->reg_proto = SNOR_PROTO_1_1_1;
3102
3103 return 0;
3104}
3105
3106
3107
3108
3109
3110
3111
3112static int spi_nor_quad_enable(struct spi_nor *nor)
3113{
3114 if (!nor->params->quad_enable)
3115 return 0;
3116
3117 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
3118 spi_nor_get_protocol_width(nor->write_proto) == 4))
3119 return 0;
3120
3121 return nor->params->quad_enable(nor);
3122}
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136static void spi_nor_try_unlock_all(struct spi_nor *nor)
3137{
3138 int ret;
3139
3140 if (!(nor->flags & SNOR_F_HAS_LOCK))
3141 return;
3142
3143 dev_dbg(nor->dev, "Unprotecting entire flash array\n");
3144
3145 ret = spi_nor_unlock(&nor->mtd, 0, nor->params->size);
3146 if (ret)
3147 dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
3148}
3149
3150static int spi_nor_init(struct spi_nor *nor)
3151{
3152 int err;
3153
3154 err = spi_nor_octal_dtr_enable(nor, true);
3155 if (err) {
3156 dev_dbg(nor->dev, "octal mode not supported\n");
3157 return err;
3158 }
3159
3160 err = spi_nor_quad_enable(nor);
3161 if (err) {
3162 dev_dbg(nor->dev, "quad mode not supported\n");
3163 return err;
3164 }
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
3177 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
3178 nor->flags & SNOR_F_SWP_IS_VOLATILE))
3179 spi_nor_try_unlock_all(nor);
3180
3181 if (nor->addr_width == 4 &&
3182 nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
3183 !(nor->flags & SNOR_F_4B_OPCODES)) {
3184
3185
3186
3187
3188
3189
3190
3191 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
3192 "enabling reset hack; may not recover from unexpected reboots\n");
3193 nor->params->set_4byte_addr_mode(nor, true);
3194 }
3195
3196 return 0;
3197}
3198
3199static void spi_nor_soft_reset(struct spi_nor *nor)
3200{
3201 struct spi_mem_op op;
3202 int ret;
3203
3204 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0),
3205 SPI_MEM_OP_NO_DUMMY,
3206 SPI_MEM_OP_NO_ADDR,
3207 SPI_MEM_OP_NO_DATA);
3208
3209 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
3210
3211 ret = spi_mem_exec_op(nor->spimem, &op);
3212 if (ret) {
3213 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3214 return;
3215 }
3216
3217 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0),
3218 SPI_MEM_OP_NO_DUMMY,
3219 SPI_MEM_OP_NO_ADDR,
3220 SPI_MEM_OP_NO_DATA);
3221
3222 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
3223
3224 ret = spi_mem_exec_op(nor->spimem, &op);
3225 if (ret) {
3226 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
3227 return;
3228 }
3229
3230
3231
3232
3233
3234
3235 usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
3236}
3237
3238
3239static int spi_nor_suspend(struct mtd_info *mtd)
3240{
3241 struct spi_nor *nor = mtd_to_spi_nor(mtd);
3242 int ret;
3243
3244
3245 ret = spi_nor_octal_dtr_enable(nor, false);
3246 if (ret)
3247 dev_err(nor->dev, "suspend() failed\n");
3248
3249 return ret;
3250}
3251
3252
3253static void spi_nor_resume(struct mtd_info *mtd)
3254{
3255 struct spi_nor *nor = mtd_to_spi_nor(mtd);
3256 struct device *dev = nor->dev;
3257 int ret;
3258
3259
3260 ret = spi_nor_init(nor);
3261 if (ret)
3262 dev_err(dev, "resume() failed\n");
3263}
3264
3265void spi_nor_restore(struct spi_nor *nor)
3266{
3267
3268 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
3269 nor->flags & SNOR_F_BROKEN_RESET)
3270 nor->params->set_4byte_addr_mode(nor, false);
3271
3272 if (nor->flags & SNOR_F_SOFT_RESET)
3273 spi_nor_soft_reset(nor);
3274}
3275EXPORT_SYMBOL_GPL(spi_nor_restore);
3276
3277static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
3278 const char *name)
3279{
3280 unsigned int i, j;
3281
3282 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
3283 for (j = 0; j < manufacturers[i]->nparts; j++) {
3284 if (!strcmp(name, manufacturers[i]->parts[j].name)) {
3285 nor->manufacturer = manufacturers[i];
3286 return &manufacturers[i]->parts[j];
3287 }
3288 }
3289 }
3290
3291 return NULL;
3292}
3293
3294static int spi_nor_set_addr_width(struct spi_nor *nor)
3295{
3296 if (nor->addr_width) {
3297
3298 } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311 nor->addr_width = 4;
3312 } else if (nor->info->addr_width) {
3313 nor->addr_width = nor->info->addr_width;
3314 } else {
3315 nor->addr_width = 3;
3316 }
3317
3318 if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) {
3319
3320 nor->addr_width = 4;
3321 }
3322
3323 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
3324 dev_dbg(nor->dev, "address width is too large: %u\n",
3325 nor->addr_width);
3326 return -EINVAL;
3327 }
3328
3329
3330 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
3331 !(nor->flags & SNOR_F_HAS_4BAIT))
3332 spi_nor_set_4byte_opcodes(nor);
3333
3334 return 0;
3335}
3336
3337static void spi_nor_debugfs_init(struct spi_nor *nor,
3338 const struct flash_info *info)
3339{
3340 struct mtd_info *mtd = &nor->mtd;
3341
3342 mtd->dbg.partname = info->name;
3343 mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
3344 info->id_len, info->id);
3345}
3346
3347static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
3348 const char *name)
3349{
3350 const struct flash_info *info = NULL;
3351
3352 if (name)
3353 info = spi_nor_match_id(nor, name);
3354
3355 if (!info)
3356 info = spi_nor_read_id(nor);
3357 if (IS_ERR_OR_NULL(info))
3358 return ERR_PTR(-ENOENT);
3359
3360
3361
3362
3363
3364 if (name && info->id_len) {
3365 const struct flash_info *jinfo;
3366
3367 jinfo = spi_nor_read_id(nor);
3368 if (IS_ERR(jinfo)) {
3369 return jinfo;
3370 } else if (jinfo != info) {
3371
3372
3373
3374
3375
3376
3377
3378 dev_warn(nor->dev, "found %s, expected %s\n",
3379 jinfo->name, info->name);
3380 info = jinfo;
3381 }
3382 }
3383
3384 return info;
3385}
3386
3387int spi_nor_scan(struct spi_nor *nor, const char *name,
3388 const struct spi_nor_hwcaps *hwcaps)
3389{
3390 const struct flash_info *info;
3391 struct device *dev = nor->dev;
3392 struct mtd_info *mtd = &nor->mtd;
3393 struct device_node *np = spi_nor_get_flash_node(nor);
3394 int ret;
3395 int i;
3396
3397 ret = spi_nor_check(nor);
3398 if (ret)
3399 return ret;
3400
3401
3402 nor->reg_proto = SNOR_PROTO_1_1_1;
3403 nor->read_proto = SNOR_PROTO_1_1_1;
3404 nor->write_proto = SNOR_PROTO_1_1_1;
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414 nor->bouncebuf_size = PAGE_SIZE;
3415 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3416 GFP_KERNEL);
3417 if (!nor->bouncebuf)
3418 return -ENOMEM;
3419
3420 info = spi_nor_get_flash_info(nor, name);
3421 if (IS_ERR(info))
3422 return PTR_ERR(info);
3423
3424 nor->info = info;
3425
3426 spi_nor_debugfs_init(nor, info);
3427
3428 mutex_init(&nor->lock);
3429
3430
3431
3432
3433
3434
3435 if (info->flags & SPI_NOR_XSR_RDY)
3436 nor->flags |= SNOR_F_READY_XSR_RDY;
3437
3438 if (info->flags & SPI_NOR_HAS_LOCK)
3439 nor->flags |= SNOR_F_HAS_LOCK;
3440
3441 mtd->_write = spi_nor_write;
3442
3443
3444 ret = spi_nor_init_params(nor);
3445 if (ret)
3446 return ret;
3447
3448 if (!mtd->name)
3449 mtd->name = dev_name(dev);
3450 mtd->priv = nor;
3451 mtd->type = MTD_NORFLASH;
3452 mtd->writesize = nor->params->writesize;
3453 mtd->flags = MTD_CAP_NORFLASH;
3454 mtd->size = nor->params->size;
3455 mtd->_erase = spi_nor_erase;
3456 mtd->_read = spi_nor_read;
3457 mtd->_suspend = spi_nor_suspend;
3458 mtd->_resume = spi_nor_resume;
3459
3460 if (nor->params->locking_ops) {
3461 mtd->_lock = spi_nor_lock;
3462 mtd->_unlock = spi_nor_unlock;
3463 mtd->_is_locked = spi_nor_is_locked;
3464 }
3465
3466 if (info->flags & USE_FSR)
3467 nor->flags |= SNOR_F_USE_FSR;
3468 if (info->flags & SPI_NOR_HAS_TB) {
3469 nor->flags |= SNOR_F_HAS_SR_TB;
3470 if (info->flags & SPI_NOR_TB_SR_BIT6)
3471 nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
3472 }
3473
3474 if (info->flags & NO_CHIP_ERASE)
3475 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
3476 if (info->flags & USE_CLSR)
3477 nor->flags |= SNOR_F_USE_CLSR;
3478 if (info->flags & SPI_NOR_SWP_IS_VOLATILE)
3479 nor->flags |= SNOR_F_SWP_IS_VOLATILE;
3480
3481 if (info->flags & SPI_NOR_4BIT_BP) {
3482 nor->flags |= SNOR_F_HAS_4BIT_BP;
3483 if (info->flags & SPI_NOR_BP3_SR_BIT6)
3484 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
3485 }
3486
3487 if (info->flags & SPI_NOR_NO_ERASE)
3488 mtd->flags |= MTD_NO_ERASE;
3489
3490 mtd->dev.parent = dev;
3491 nor->page_size = nor->params->page_size;
3492 mtd->writebufsize = nor->page_size;
3493
3494 if (of_property_read_bool(np, "broken-flash-reset"))
3495 nor->flags |= SNOR_F_BROKEN_RESET;
3496
3497
3498
3499
3500
3501
3502
3503 ret = spi_nor_setup(nor, hwcaps);
3504 if (ret)
3505 return ret;
3506
3507 if (info->flags & SPI_NOR_4B_OPCODES)
3508 nor->flags |= SNOR_F_4B_OPCODES;
3509
3510 if (info->flags & SPI_NOR_IO_MODE_EN_VOLATILE)
3511 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
3512
3513 ret = spi_nor_set_addr_width(nor);
3514 if (ret)
3515 return ret;
3516
3517
3518 ret = spi_nor_init(nor);
3519 if (ret)
3520 return ret;
3521
3522 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
3523 (long long)mtd->size >> 10);
3524
3525 dev_dbg(dev,
3526 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
3527 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
3528 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
3529 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
3530
3531 if (mtd->numeraseregions)
3532 for (i = 0; i < mtd->numeraseregions; i++)
3533 dev_dbg(dev,
3534 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
3535 ".erasesize = 0x%.8x (%uKiB), "
3536 ".numblocks = %d }\n",
3537 i, (long long)mtd->eraseregions[i].offset,
3538 mtd->eraseregions[i].erasesize,
3539 mtd->eraseregions[i].erasesize / 1024,
3540 mtd->eraseregions[i].numblocks);
3541 return 0;
3542}
3543EXPORT_SYMBOL_GPL(spi_nor_scan);
3544
3545static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3546{
3547 struct spi_mem_dirmap_info info = {
3548 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
3549 SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
3550 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
3551 SPI_MEM_OP_DATA_IN(0, NULL, 0)),
3552 .offset = 0,
3553 .length = nor->mtd.size,
3554 };
3555 struct spi_mem_op *op = &info.op_tmpl;
3556
3557 spi_nor_spimem_setup_op(nor, op, nor->read_proto);
3558
3559
3560 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3561 if (spi_nor_protocol_is_dtr(nor->read_proto))
3562 op->dummy.nbytes *= 2;
3563
3564
3565
3566
3567
3568
3569 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3570
3571 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3572 &info);
3573 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3574}
3575
3576static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3577{
3578 struct spi_mem_dirmap_info info = {
3579 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
3580 SPI_MEM_OP_ADDR(nor->addr_width, 0, 0),
3581 SPI_MEM_OP_NO_DUMMY,
3582 SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
3583 .offset = 0,
3584 .length = nor->mtd.size,
3585 };
3586 struct spi_mem_op *op = &info.op_tmpl;
3587
3588 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3589 op->addr.nbytes = 0;
3590
3591 spi_nor_spimem_setup_op(nor, op, nor->write_proto);
3592
3593
3594
3595
3596
3597
3598 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3599
3600 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3601 &info);
3602 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3603}
3604
3605static int spi_nor_probe(struct spi_mem *spimem)
3606{
3607 struct spi_device *spi = spimem->spi;
3608 struct flash_platform_data *data = dev_get_platdata(&spi->dev);
3609 struct spi_nor *nor;
3610
3611
3612
3613
3614 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3615 char *flash_name;
3616 int ret;
3617
3618 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
3619 if (!nor)
3620 return -ENOMEM;
3621
3622 nor->spimem = spimem;
3623 nor->dev = &spi->dev;
3624 spi_nor_set_flash_node(nor, spi->dev.of_node);
3625
3626 spi_mem_set_drvdata(spimem, nor);
3627
3628 if (data && data->name)
3629 nor->mtd.name = data->name;
3630
3631 if (!nor->mtd.name)
3632 nor->mtd.name = spi_mem_get_name(spimem);
3633
3634
3635
3636
3637
3638
3639
3640 if (data && data->type)
3641 flash_name = data->type;
3642 else if (!strcmp(spi->modalias, "spi-nor"))
3643 flash_name = NULL;
3644 else
3645 flash_name = spi->modalias;
3646
3647 ret = spi_nor_scan(nor, flash_name, &hwcaps);
3648 if (ret)
3649 return ret;
3650
3651
3652
3653
3654
3655
3656 if (nor->page_size > PAGE_SIZE) {
3657 nor->bouncebuf_size = nor->page_size;
3658 devm_kfree(nor->dev, nor->bouncebuf);
3659 nor->bouncebuf = devm_kmalloc(nor->dev,
3660 nor->bouncebuf_size,
3661 GFP_KERNEL);
3662 if (!nor->bouncebuf)
3663 return -ENOMEM;
3664 }
3665
3666 ret = spi_nor_create_read_dirmap(nor);
3667 if (ret)
3668 return ret;
3669
3670 ret = spi_nor_create_write_dirmap(nor);
3671 if (ret)
3672 return ret;
3673
3674 return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3675 data ? data->nr_parts : 0);
3676}
3677
3678static int spi_nor_remove(struct spi_mem *spimem)
3679{
3680 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3681
3682 spi_nor_restore(nor);
3683
3684
3685 return mtd_device_unregister(&nor->mtd);
3686}
3687
3688static void spi_nor_shutdown(struct spi_mem *spimem)
3689{
3690 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3691
3692 spi_nor_restore(nor);
3693}
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707static const struct spi_device_id spi_nor_dev_ids[] = {
3708
3709
3710
3711
3712
3713 {"spi-nor"},
3714
3715
3716
3717
3718
3719 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
3720
3721
3722
3723
3724
3725 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
3726 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3727 {"mx25l25635e"},{"mx66l51235l"},
3728 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3729 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
3730 {"s25fl064k"},
3731 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3732 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
3733 {"m25p64"}, {"m25p128"},
3734 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
3735 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
3736
3737
3738 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
3739 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
3740 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
3741
3742
3743 { "mr25h128" },
3744 { "mr25h256" },
3745 { "mr25h10" },
3746 { "mr25h40" },
3747
3748 { },
3749};
3750MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3751
3752static const struct of_device_id spi_nor_of_table[] = {
3753
3754
3755
3756
3757 { .compatible = "jedec,spi-nor" },
3758 { },
3759};
3760MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3761
3762
3763
3764
3765
3766
3767static struct spi_mem_driver spi_nor_driver = {
3768 .spidrv = {
3769 .driver = {
3770 .name = "spi-nor",
3771 .of_match_table = spi_nor_of_table,
3772 },
3773 .id_table = spi_nor_dev_ids,
3774 },
3775 .probe = spi_nor_probe,
3776 .remove = spi_nor_remove,
3777 .shutdown = spi_nor_shutdown,
3778};
3779module_spi_mem_driver(spi_nor_driver);
3780
3781MODULE_LICENSE("GPL v2");
3782MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3783MODULE_AUTHOR("Mike Lavender");
3784MODULE_DESCRIPTION("framework for SPI NOR");
3785