1
2
3
4
5
6
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/slab.h>
11#include <linux/sched/task_stack.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
14#include <linux/mtd/partitions.h>
15#include <linux/of.h>
16#include <linux/of_device.h>
17#include <linux/pm_runtime.h>
18#include <linux/dma/mxs-dma.h>
19#include "gpmi-nand.h"
20#include "gpmi-regs.h"
21#include "bch-regs.h"
22
23
24#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
25#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
26#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
27
28
29#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
30
31#define MXS_SET_ADDR 0x4
32#define MXS_CLR_ADDR 0x8
33
34
35
36
37
38static int clear_poll_bit(void __iomem *addr, u32 mask)
39{
40 int timeout = 0x400;
41
42
43 writel(mask, addr + MXS_CLR_ADDR);
44
45
46
47
48
49 udelay(1);
50
51
52 while ((readl(addr) & mask) && --timeout)
53 ;
54
55 return !timeout;
56}
57
58#define MODULE_CLKGATE (1 << 30)
59#define MODULE_SFTRST (1 << 31)
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
78{
79 int ret;
80 int timeout = 0x400;
81
82
83 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
84 if (unlikely(ret))
85 goto error;
86
87
88 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
89
90 if (!just_enable) {
91
92 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
93 udelay(1);
94
95
96 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
97 ;
98 if (unlikely(!timeout))
99 goto error;
100 }
101
102
103 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
104 if (unlikely(ret))
105 goto error;
106
107
108 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
109 if (unlikely(ret))
110 goto error;
111
112 return 0;
113
114error:
115 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
116 return -ETIMEDOUT;
117}
118
119static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
120{
121 struct clk *clk;
122 int ret;
123 int i;
124
125 for (i = 0; i < GPMI_CLK_MAX; i++) {
126 clk = this->resources.clock[i];
127 if (!clk)
128 break;
129
130 if (v) {
131 ret = clk_prepare_enable(clk);
132 if (ret)
133 goto err_clk;
134 } else {
135 clk_disable_unprepare(clk);
136 }
137 }
138 return 0;
139
140err_clk:
141 for (; i > 0; i--)
142 clk_disable_unprepare(this->resources.clock[i - 1]);
143 return ret;
144}
145
146static int gpmi_init(struct gpmi_nand_data *this)
147{
148 struct resources *r = &this->resources;
149 int ret;
150
151 ret = gpmi_reset_block(r->gpmi_regs, false);
152 if (ret)
153 goto err_out;
154
155
156
157
158
159 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
160 if (ret)
161 goto err_out;
162
163
164 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
165
166
167 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
168 r->gpmi_regs + HW_GPMI_CTRL1_SET);
169
170
171 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
172
173
174 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
175
176
177
178
179
180 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
181
182 return 0;
183err_out:
184 return ret;
185}
186
187
188static void gpmi_dump_info(struct gpmi_nand_data *this)
189{
190 struct resources *r = &this->resources;
191 struct bch_geometry *geo = &this->bch_geometry;
192 u32 reg;
193 int i;
194
195 dev_err(this->dev, "Show GPMI registers :\n");
196 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
197 reg = readl(r->gpmi_regs + i * 0x10);
198 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
199 }
200
201
202 dev_err(this->dev, "Show BCH registers :\n");
203 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
204 reg = readl(r->bch_regs + i * 0x10);
205 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
206 }
207 dev_err(this->dev, "BCH Geometry :\n"
208 "GF length : %u\n"
209 "ECC Strength : %u\n"
210 "Page Size in Bytes : %u\n"
211 "Metadata Size in Bytes : %u\n"
212 "ECC Chunk Size in Bytes: %u\n"
213 "ECC Chunk Count : %u\n"
214 "Payload Size in Bytes : %u\n"
215 "Auxiliary Size in Bytes: %u\n"
216 "Auxiliary Status Offset: %u\n"
217 "Block Mark Byte Offset : %u\n"
218 "Block Mark Bit Offset : %u\n",
219 geo->gf_len,
220 geo->ecc_strength,
221 geo->page_size,
222 geo->metadata_size,
223 geo->ecc_chunk_size,
224 geo->ecc_chunk_count,
225 geo->payload_size,
226 geo->auxiliary_size,
227 geo->auxiliary_status_offset,
228 geo->block_mark_byte_offset,
229 geo->block_mark_bit_offset);
230}
231
232static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
233{
234 struct bch_geometry *geo = &this->bch_geometry;
235
236
237 if (GPMI_IS_MXS(this)) {
238
239 if (geo->gf_len == 14)
240 return false;
241 }
242 return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
243}
244
245
246
247
248
249
250
251static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
252 unsigned int ecc_strength,
253 unsigned int ecc_step)
254{
255 struct bch_geometry *geo = &this->bch_geometry;
256 struct nand_chip *chip = &this->nand;
257 struct mtd_info *mtd = nand_to_mtd(chip);
258 unsigned int block_mark_bit_offset;
259
260 switch (ecc_step) {
261 case SZ_512:
262 geo->gf_len = 13;
263 break;
264 case SZ_1K:
265 geo->gf_len = 14;
266 break;
267 default:
268 dev_err(this->dev,
269 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
270 chip->base.eccreq.strength,
271 chip->base.eccreq.step_size);
272 return -EINVAL;
273 }
274 geo->ecc_chunk_size = ecc_step;
275 geo->ecc_strength = round_up(ecc_strength, 2);
276 if (!gpmi_check_ecc(this))
277 return -EINVAL;
278
279
280 if (geo->ecc_chunk_size < mtd->oobsize) {
281 dev_err(this->dev,
282 "unsupported nand chip. ecc size: %d, oob size : %d\n",
283 ecc_step, mtd->oobsize);
284 return -EINVAL;
285 }
286
287
288 geo->metadata_size = 10;
289
290 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340 geo->page_size = mtd->writesize + geo->metadata_size +
341 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
342
343 geo->payload_size = mtd->writesize;
344
345 geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
346 geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
347 + ALIGN(geo->ecc_chunk_count, 4);
348
349 if (!this->swap_block_mark)
350 return 0;
351
352
353 block_mark_bit_offset = mtd->writesize * 8 -
354 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
355 + geo->metadata_size * 8);
356
357 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
358 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
359 return 0;
360}
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380static inline int get_ecc_strength(struct gpmi_nand_data *this)
381{
382 struct bch_geometry *geo = &this->bch_geometry;
383 struct mtd_info *mtd = nand_to_mtd(&this->nand);
384 int ecc_strength;
385
386 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
387 / (geo->gf_len * geo->ecc_chunk_count);
388
389
390 return round_down(ecc_strength, 2);
391}
392
393static int legacy_set_geometry(struct gpmi_nand_data *this)
394{
395 struct bch_geometry *geo = &this->bch_geometry;
396 struct mtd_info *mtd = nand_to_mtd(&this->nand);
397 unsigned int metadata_size;
398 unsigned int status_size;
399 unsigned int block_mark_bit_offset;
400
401
402
403
404
405
406 geo->metadata_size = 10;
407
408
409 geo->gf_len = 13;
410
411
412 geo->ecc_chunk_size = 512;
413 while (geo->ecc_chunk_size < mtd->oobsize) {
414 geo->ecc_chunk_size *= 2;
415 geo->gf_len = 14;
416 }
417
418 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
419
420
421 geo->ecc_strength = get_ecc_strength(this);
422 if (!gpmi_check_ecc(this)) {
423 dev_err(this->dev,
424 "ecc strength: %d cannot be supported by the controller (%d)\n"
425 "try to use minimum ecc strength that NAND chip required\n",
426 geo->ecc_strength,
427 this->devdata->bch_max_ecc_strength);
428 return -EINVAL;
429 }
430
431 geo->page_size = mtd->writesize + geo->metadata_size +
432 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
433 geo->payload_size = mtd->writesize;
434
435
436
437
438
439
440
441 metadata_size = ALIGN(geo->metadata_size, 4);
442 status_size = ALIGN(geo->ecc_chunk_count, 4);
443
444 geo->auxiliary_size = metadata_size + status_size;
445 geo->auxiliary_status_offset = metadata_size;
446
447 if (!this->swap_block_mark)
448 return 0;
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496 block_mark_bit_offset = mtd->writesize * 8 -
497 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
498 + geo->metadata_size * 8);
499
500 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
501 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
502 return 0;
503}
504
505static int common_nfc_set_geometry(struct gpmi_nand_data *this)
506{
507 struct nand_chip *chip = &this->nand;
508
509 if (chip->ecc.strength > 0 && chip->ecc.size > 0)
510 return set_geometry_by_ecc_info(this, chip->ecc.strength,
511 chip->ecc.size);
512
513 if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
514 || legacy_set_geometry(this)) {
515 if (!(chip->base.eccreq.strength > 0 &&
516 chip->base.eccreq.step_size > 0))
517 return -EINVAL;
518
519 return set_geometry_by_ecc_info(this,
520 chip->base.eccreq.strength,
521 chip->base.eccreq.step_size);
522 }
523
524 return 0;
525}
526
527
528static int bch_set_geometry(struct gpmi_nand_data *this)
529{
530 struct resources *r = &this->resources;
531 int ret;
532
533 ret = common_nfc_set_geometry(this);
534 if (ret)
535 return ret;
536
537 ret = pm_runtime_get_sync(this->dev);
538 if (ret < 0)
539 return ret;
540
541
542
543
544
545
546 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
547 if (ret)
548 goto err_out;
549
550
551 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
552
553 ret = 0;
554err_out:
555 pm_runtime_mark_last_busy(this->dev);
556 pm_runtime_put_autosuspend(this->dev);
557
558 return ret;
559}
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
636 const struct nand_sdr_timings *sdr)
637{
638 struct gpmi_nfc_hardware_timing *hw = &this->hw;
639 unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
640 unsigned int period_ps, reference_period_ps;
641 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
642 unsigned int tRP_ps;
643 bool use_half_period;
644 int sample_delay_ps, sample_delay_factor;
645 u16 busy_timeout_cycles;
646 u8 wrn_dly_sel;
647
648 if (sdr->tRC_min >= 30000) {
649
650 hw->clk_rate = 22000000;
651 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
652 } else if (sdr->tRC_min >= 25000) {
653
654 hw->clk_rate = 80000000;
655 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
656 } else {
657
658 hw->clk_rate = 100000000;
659 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
660 }
661
662
663 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
664
665 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
666 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
667 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
668 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
669
670 hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
671 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
672 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
673 hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
674
675
676
677
678
679
680
681
682 if (period_ps > dll_threshold_ps) {
683 use_half_period = true;
684 reference_period_ps = period_ps / 2;
685 } else {
686 use_half_period = false;
687 reference_period_ps = period_ps;
688 }
689
690 tRP_ps = data_setup_cycles * period_ps;
691 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
692 if (sample_delay_ps > 0)
693 sample_delay_factor = sample_delay_ps / reference_period_ps;
694 else
695 sample_delay_factor = 0;
696
697 hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
698 if (sample_delay_factor)
699 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
700 BM_GPMI_CTRL1_DLL_ENABLE |
701 (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
702}
703
704static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
705{
706 struct gpmi_nfc_hardware_timing *hw = &this->hw;
707 struct resources *r = &this->resources;
708 void __iomem *gpmi_regs = r->gpmi_regs;
709 unsigned int dll_wait_time_us;
710
711 clk_set_rate(r->clock[0], hw->clk_rate);
712
713 writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
714 writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
715
716
717
718
719
720 writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
721 writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
722
723
724 dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
725 if (!dll_wait_time_us)
726 dll_wait_time_us = 1;
727
728
729 udelay(dll_wait_time_us);
730}
731
732static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
733 const struct nand_data_interface *conf)
734{
735 struct gpmi_nand_data *this = nand_get_controller_data(chip);
736 const struct nand_sdr_timings *sdr;
737
738
739 sdr = nand_get_sdr_timings(conf);
740 if (IS_ERR(sdr))
741 return PTR_ERR(sdr);
742
743
744 if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
745 return -ENOTSUPP;
746
747
748 if (chipnr < 0)
749 return 0;
750
751
752 gpmi_nfc_compute_timings(this, sdr);
753
754 this->hw.must_apply_timings = true;
755
756 return 0;
757}
758
759
760static void gpmi_clear_bch(struct gpmi_nand_data *this)
761{
762 struct resources *r = &this->resources;
763 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
764}
765
766static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
767{
768
769 return this->dma_chans[0];
770}
771
772
773static void dma_irq_callback(void *param)
774{
775 struct gpmi_nand_data *this = param;
776 struct completion *dma_c = &this->dma_done;
777
778 complete(dma_c);
779}
780
781static irqreturn_t bch_irq(int irq, void *cookie)
782{
783 struct gpmi_nand_data *this = cookie;
784
785 gpmi_clear_bch(this);
786 complete(&this->bch_done);
787 return IRQ_HANDLED;
788}
789
790static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
791{
792
793
794
795
796 if (this->bch)
797 return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
798 else
799 return raw_len;
800}
801
802
803static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
804 int raw_len, struct scatterlist *sgl,
805 enum dma_data_direction dr)
806{
807 int ret;
808 int len = gpmi_raw_len_to_len(this, raw_len);
809
810
811 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
812 sg_init_one(sgl, buf, len);
813 ret = dma_map_sg(this->dev, sgl, 1, dr);
814 if (ret == 0)
815 goto map_fail;
816
817 return true;
818 }
819
820map_fail:
821
822 sg_init_one(sgl, this->data_buffer_dma, len);
823
824 if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
825 memcpy(this->data_buffer_dma, buf, len);
826
827 dma_map_sg(this->dev, sgl, 1, dr);
828
829 return false;
830}
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src,
848 size_t src_bit_off, size_t nbits)
849{
850 size_t i;
851 size_t nbytes;
852 u32 src_buffer = 0;
853 size_t bits_in_src_buffer = 0;
854
855 if (!nbits)
856 return;
857
858
859
860
861
862 src += src_bit_off / 8;
863 src_bit_off %= 8;
864
865 dst += dst_bit_off / 8;
866 dst_bit_off %= 8;
867
868
869
870
871
872 if (src_bit_off) {
873 src_buffer = src[0] >> src_bit_off;
874 if (nbits >= (8 - src_bit_off)) {
875 bits_in_src_buffer += 8 - src_bit_off;
876 } else {
877 src_buffer &= GENMASK(nbits - 1, 0);
878 bits_in_src_buffer += nbits;
879 }
880 nbits -= bits_in_src_buffer;
881 src++;
882 }
883
884
885 nbytes = nbits / 8;
886
887
888 if (dst_bit_off) {
889 if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
890 src_buffer |= src[0] << bits_in_src_buffer;
891 bits_in_src_buffer += 8;
892 src++;
893 nbytes--;
894 }
895
896 if (bits_in_src_buffer >= (8 - dst_bit_off)) {
897 dst[0] &= GENMASK(dst_bit_off - 1, 0);
898 dst[0] |= src_buffer << dst_bit_off;
899 src_buffer >>= (8 - dst_bit_off);
900 bits_in_src_buffer -= (8 - dst_bit_off);
901 dst_bit_off = 0;
902 dst++;
903 if (bits_in_src_buffer > 7) {
904 bits_in_src_buffer -= 8;
905 dst[0] = src_buffer;
906 dst++;
907 src_buffer >>= 8;
908 }
909 }
910 }
911
912 if (!bits_in_src_buffer && !dst_bit_off) {
913
914
915
916
917 if (nbytes)
918 memcpy(dst, src, nbytes);
919 } else {
920
921
922
923
924
925 for (i = 0; i < nbytes; i++) {
926 src_buffer |= src[i] << bits_in_src_buffer;
927 dst[i] = src_buffer;
928 src_buffer >>= 8;
929 }
930 }
931
932 dst += nbytes;
933 src += nbytes;
934
935
936
937
938
939 nbits %= 8;
940
941
942
943
944
945 if (!nbits && !bits_in_src_buffer)
946 return;
947
948
949 if (nbits)
950 src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
951 bits_in_src_buffer;
952 bits_in_src_buffer += nbits;
953
954
955
956
957
958
959
960 if (dst_bit_off)
961 src_buffer = (src_buffer << dst_bit_off) |
962 (*dst & GENMASK(dst_bit_off - 1, 0));
963 bits_in_src_buffer += dst_bit_off;
964
965
966
967
968
969 nbytes = bits_in_src_buffer / 8;
970 if (bits_in_src_buffer % 8) {
971 src_buffer |= (dst[nbytes] &
972 GENMASK(7, bits_in_src_buffer % 8)) <<
973 (nbytes * 8);
974 nbytes++;
975 }
976
977
978 for (i = 0; i < nbytes; i++) {
979 dst[i] = src_buffer;
980 src_buffer >>= 8;
981 }
982}
983
984
985static uint8_t scan_ff_pattern[] = { 0xff };
986static struct nand_bbt_descr gpmi_bbt_descr = {
987 .options = 0,
988 .offs = 0,
989 .len = 1,
990 .pattern = scan_ff_pattern
991};
992
993
994
995
996
997static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
998 struct mtd_oob_region *oobregion)
999{
1000 struct nand_chip *chip = mtd_to_nand(mtd);
1001 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1002 struct bch_geometry *geo = &this->bch_geometry;
1003
1004 if (section)
1005 return -ERANGE;
1006
1007 oobregion->offset = 0;
1008 oobregion->length = geo->page_size - mtd->writesize;
1009
1010 return 0;
1011}
1012
1013static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
1014 struct mtd_oob_region *oobregion)
1015{
1016 struct nand_chip *chip = mtd_to_nand(mtd);
1017 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1018 struct bch_geometry *geo = &this->bch_geometry;
1019
1020 if (section)
1021 return -ERANGE;
1022
1023
1024 if (geo->page_size < mtd->writesize + mtd->oobsize) {
1025 oobregion->offset = geo->page_size - mtd->writesize;
1026 oobregion->length = mtd->oobsize - oobregion->offset;
1027 }
1028
1029 return 0;
1030}
1031
1032static const char * const gpmi_clks_for_mx2x[] = {
1033 "gpmi_io",
1034};
1035
1036static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
1037 .ecc = gpmi_ooblayout_ecc,
1038 .free = gpmi_ooblayout_free,
1039};
1040
1041static const struct gpmi_devdata gpmi_devdata_imx23 = {
1042 .type = IS_MX23,
1043 .bch_max_ecc_strength = 20,
1044 .max_chain_delay = 16000,
1045 .clks = gpmi_clks_for_mx2x,
1046 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1047};
1048
1049static const struct gpmi_devdata gpmi_devdata_imx28 = {
1050 .type = IS_MX28,
1051 .bch_max_ecc_strength = 20,
1052 .max_chain_delay = 16000,
1053 .clks = gpmi_clks_for_mx2x,
1054 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1055};
1056
1057static const char * const gpmi_clks_for_mx6[] = {
1058 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
1059};
1060
1061static const struct gpmi_devdata gpmi_devdata_imx6q = {
1062 .type = IS_MX6Q,
1063 .bch_max_ecc_strength = 40,
1064 .max_chain_delay = 12000,
1065 .clks = gpmi_clks_for_mx6,
1066 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1067};
1068
1069static const struct gpmi_devdata gpmi_devdata_imx6sx = {
1070 .type = IS_MX6SX,
1071 .bch_max_ecc_strength = 62,
1072 .max_chain_delay = 12000,
1073 .clks = gpmi_clks_for_mx6,
1074 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1075};
1076
1077static const char * const gpmi_clks_for_mx7d[] = {
1078 "gpmi_io", "gpmi_bch_apb",
1079};
1080
1081static const struct gpmi_devdata gpmi_devdata_imx7d = {
1082 .type = IS_MX7D,
1083 .bch_max_ecc_strength = 62,
1084 .max_chain_delay = 12000,
1085 .clks = gpmi_clks_for_mx7d,
1086 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
1087};
1088
1089static int acquire_register_block(struct gpmi_nand_data *this,
1090 const char *res_name)
1091{
1092 struct platform_device *pdev = this->pdev;
1093 struct resources *res = &this->resources;
1094 struct resource *r;
1095 void __iomem *p;
1096
1097 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
1098 p = devm_ioremap_resource(&pdev->dev, r);
1099 if (IS_ERR(p))
1100 return PTR_ERR(p);
1101
1102 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
1103 res->gpmi_regs = p;
1104 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
1105 res->bch_regs = p;
1106 else
1107 dev_err(this->dev, "unknown resource name : %s\n", res_name);
1108
1109 return 0;
1110}
1111
1112static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
1113{
1114 struct platform_device *pdev = this->pdev;
1115 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
1116 struct resource *r;
1117 int err;
1118
1119 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
1120 if (!r) {
1121 dev_err(this->dev, "Can't get resource for %s\n", res_name);
1122 return -ENODEV;
1123 }
1124
1125 err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
1126 if (err)
1127 dev_err(this->dev, "error requesting BCH IRQ\n");
1128
1129 return err;
1130}
1131
1132static void release_dma_channels(struct gpmi_nand_data *this)
1133{
1134 unsigned int i;
1135 for (i = 0; i < DMA_CHANS; i++)
1136 if (this->dma_chans[i]) {
1137 dma_release_channel(this->dma_chans[i]);
1138 this->dma_chans[i] = NULL;
1139 }
1140}
1141
1142static int acquire_dma_channels(struct gpmi_nand_data *this)
1143{
1144 struct platform_device *pdev = this->pdev;
1145 struct dma_chan *dma_chan;
1146
1147
1148 dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
1149 if (!dma_chan) {
1150 dev_err(this->dev, "Failed to request DMA channel.\n");
1151 goto acquire_err;
1152 }
1153
1154 this->dma_chans[0] = dma_chan;
1155 return 0;
1156
1157acquire_err:
1158 release_dma_channels(this);
1159 return -EINVAL;
1160}
1161
1162static int gpmi_get_clks(struct gpmi_nand_data *this)
1163{
1164 struct resources *r = &this->resources;
1165 struct clk *clk;
1166 int err, i;
1167
1168 for (i = 0; i < this->devdata->clks_count; i++) {
1169 clk = devm_clk_get(this->dev, this->devdata->clks[i]);
1170 if (IS_ERR(clk)) {
1171 err = PTR_ERR(clk);
1172 goto err_clock;
1173 }
1174
1175 r->clock[i] = clk;
1176 }
1177
1178 if (GPMI_IS_MX6(this))
1179
1180
1181
1182
1183
1184
1185 clk_set_rate(r->clock[0], 22000000);
1186
1187 return 0;
1188
1189err_clock:
1190 dev_dbg(this->dev, "failed in finding the clocks.\n");
1191 return err;
1192}
1193
1194static int acquire_resources(struct gpmi_nand_data *this)
1195{
1196 int ret;
1197
1198 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
1199 if (ret)
1200 goto exit_regs;
1201
1202 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
1203 if (ret)
1204 goto exit_regs;
1205
1206 ret = acquire_bch_irq(this, bch_irq);
1207 if (ret)
1208 goto exit_regs;
1209
1210 ret = acquire_dma_channels(this);
1211 if (ret)
1212 goto exit_regs;
1213
1214 ret = gpmi_get_clks(this);
1215 if (ret)
1216 goto exit_clock;
1217 return 0;
1218
1219exit_clock:
1220 release_dma_channels(this);
1221exit_regs:
1222 return ret;
1223}
1224
1225static void release_resources(struct gpmi_nand_data *this)
1226{
1227 release_dma_channels(this);
1228}
1229
1230static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
1231{
1232 struct device *dev = this->dev;
1233 struct bch_geometry *geo = &this->bch_geometry;
1234
1235 if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
1236 dma_free_coherent(dev, geo->auxiliary_size,
1237 this->auxiliary_virt,
1238 this->auxiliary_phys);
1239 kfree(this->data_buffer_dma);
1240 kfree(this->raw_buffer);
1241
1242 this->data_buffer_dma = NULL;
1243 this->raw_buffer = NULL;
1244}
1245
1246
1247static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
1248{
1249 struct bch_geometry *geo = &this->bch_geometry;
1250 struct device *dev = this->dev;
1251 struct mtd_info *mtd = nand_to_mtd(&this->nand);
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261 this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
1262 GFP_DMA | GFP_KERNEL);
1263 if (this->data_buffer_dma == NULL)
1264 goto error_alloc;
1265
1266 this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
1267 &this->auxiliary_phys, GFP_DMA);
1268 if (!this->auxiliary_virt)
1269 goto error_alloc;
1270
1271 this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
1272 if (!this->raw_buffer)
1273 goto error_alloc;
1274
1275 return 0;
1276
1277error_alloc:
1278 gpmi_free_dma_buffer(this);
1279 return -ENOMEM;
1280}
1281
1282
1283
1284
1285
1286
1287static void block_mark_swapping(struct gpmi_nand_data *this,
1288 void *payload, void *auxiliary)
1289{
1290 struct bch_geometry *nfc_geo = &this->bch_geometry;
1291 unsigned char *p;
1292 unsigned char *a;
1293 unsigned int bit;
1294 unsigned char mask;
1295 unsigned char from_data;
1296 unsigned char from_oob;
1297
1298 if (!this->swap_block_mark)
1299 return;
1300
1301
1302
1303
1304
1305 bit = nfc_geo->block_mark_bit_offset;
1306 p = payload + nfc_geo->block_mark_byte_offset;
1307 a = auxiliary;
1308
1309
1310
1311
1312
1313
1314
1315 from_data = (p[0] >> bit) | (p[1] << (8 - bit));
1316
1317
1318 from_oob = a[0];
1319
1320
1321 a[0] = from_data;
1322
1323 mask = (0x1 << bit) - 1;
1324 p[0] = (p[0] & mask) | (from_oob << bit);
1325
1326 mask = ~0 << bit;
1327 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1328}
1329
1330static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
1331 int last, int meta)
1332{
1333 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1334 struct bch_geometry *nfc_geo = &this->bch_geometry;
1335 struct mtd_info *mtd = nand_to_mtd(chip);
1336 int i;
1337 unsigned char *status;
1338 unsigned int max_bitflips = 0;
1339
1340
1341 status = this->auxiliary_virt + ALIGN(meta, 4);
1342
1343 for (i = first; i < last; i++, status++) {
1344 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1345 continue;
1346
1347 if (*status == STATUS_UNCORRECTABLE) {
1348 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1349 u8 *eccbuf = this->raw_buffer;
1350 int offset, bitoffset;
1351 int eccbytes;
1352 int flips;
1353
1354
1355 offset = nfc_geo->metadata_size * 8;
1356 offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1357 offset -= eccbits;
1358 bitoffset = offset % 8;
1359 eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1360 offset /= 8;
1361 eccbytes -= offset;
1362 nand_change_read_column_op(chip, offset, eccbuf,
1363 eccbytes, false);
1364
1365
1366
1367
1368
1369
1370
1371
1372 if (bitoffset)
1373 eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1374
1375 bitoffset = (bitoffset + eccbits) % 8;
1376 if (bitoffset)
1377 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390 if (i == 0) {
1391
1392 flips = nand_check_erased_ecc_chunk(
1393 buf + i * nfc_geo->ecc_chunk_size,
1394 nfc_geo->ecc_chunk_size,
1395 eccbuf, eccbytes,
1396 this->auxiliary_virt,
1397 nfc_geo->metadata_size,
1398 nfc_geo->ecc_strength);
1399 } else {
1400 flips = nand_check_erased_ecc_chunk(
1401 buf + i * nfc_geo->ecc_chunk_size,
1402 nfc_geo->ecc_chunk_size,
1403 eccbuf, eccbytes,
1404 NULL, 0,
1405 nfc_geo->ecc_strength);
1406 }
1407
1408 if (flips > 0) {
1409 max_bitflips = max_t(unsigned int, max_bitflips,
1410 flips);
1411 mtd->ecc_stats.corrected += flips;
1412 continue;
1413 }
1414
1415 mtd->ecc_stats.failed++;
1416 continue;
1417 }
1418
1419 mtd->ecc_stats.corrected += *status;
1420 max_bitflips = max_t(unsigned int, max_bitflips, *status);
1421 }
1422
1423 return max_bitflips;
1424}
1425
1426static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1427{
1428 struct bch_geometry *geo = &this->bch_geometry;
1429 unsigned int ecc_strength = geo->ecc_strength >> 1;
1430 unsigned int gf_len = geo->gf_len;
1431 unsigned int block_size = geo->ecc_chunk_size;
1432
1433 this->bch_flashlayout0 =
1434 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1435 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1436 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1437 BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1438 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
1439
1440 this->bch_flashlayout1 =
1441 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1442 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1443 BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1444 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
1445}
1446
1447static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1448 int oob_required, int page)
1449{
1450 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1451 struct mtd_info *mtd = nand_to_mtd(chip);
1452 struct bch_geometry *geo = &this->bch_geometry;
1453 unsigned int max_bitflips;
1454 int ret;
1455
1456 gpmi_bch_layout_std(this);
1457 this->bch = true;
1458
1459 ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1460 if (ret)
1461 return ret;
1462
1463 max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1464 geo->ecc_chunk_count,
1465 geo->auxiliary_status_offset);
1466
1467
1468 block_mark_swapping(this, buf, this->auxiliary_virt);
1469
1470 if (oob_required) {
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 memset(chip->oob_poi, ~0, mtd->oobsize);
1482 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
1483 }
1484
1485 return max_bitflips;
1486}
1487
1488
1489static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1490 uint32_t len, uint8_t *buf, int page)
1491{
1492 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1493 struct bch_geometry *geo = &this->bch_geometry;
1494 int size = chip->ecc.size;
1495 int meta, n, page_size;
1496 unsigned int max_bitflips;
1497 unsigned int ecc_strength;
1498 int first, last, marker_pos;
1499 int ecc_parity_size;
1500 int col = 0;
1501 int ret;
1502
1503
1504 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1505
1506
1507 first = offs / size;
1508 last = (offs + len - 1) / size;
1509
1510 if (this->swap_block_mark) {
1511
1512
1513
1514
1515
1516
1517
1518 marker_pos = geo->block_mark_byte_offset / size;
1519 if (last >= marker_pos && first <= marker_pos) {
1520 dev_dbg(this->dev,
1521 "page:%d, first:%d, last:%d, marker at:%d\n",
1522 page, first, last, marker_pos);
1523 return gpmi_ecc_read_page(chip, buf, 0, page);
1524 }
1525 }
1526
1527 meta = geo->metadata_size;
1528 if (first) {
1529 col = meta + (size + ecc_parity_size) * first;
1530 meta = 0;
1531 buf = buf + first * size;
1532 }
1533
1534 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1535
1536 n = last - first + 1;
1537 page_size = meta + (size + ecc_parity_size) * n;
1538 ecc_strength = geo->ecc_strength >> 1;
1539
1540 this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
1541 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1542 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1543 BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1544 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
1545
1546 this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1547 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1548 BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1549 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
1550
1551 this->bch = true;
1552
1553 ret = nand_read_page_op(chip, page, col, buf, page_size);
1554 if (ret)
1555 return ret;
1556
1557 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1558 page, offs, len, col, first, n, page_size);
1559
1560 max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
1561
1562 return max_bitflips;
1563}
1564
1565static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1566 int oob_required, int page)
1567{
1568 struct mtd_info *mtd = nand_to_mtd(chip);
1569 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1570 struct bch_geometry *nfc_geo = &this->bch_geometry;
1571 int ret;
1572
1573 dev_dbg(this->dev, "ecc write page.\n");
1574
1575 gpmi_bch_layout_std(this);
1576 this->bch = true;
1577
1578 memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
1579
1580 if (this->swap_block_mark) {
1581
1582
1583
1584
1585 memcpy(this->data_buffer_dma, buf, mtd->writesize);
1586 buf = this->data_buffer_dma;
1587 block_mark_swapping(this, this->data_buffer_dma,
1588 this->auxiliary_virt);
1589 }
1590
1591 ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
1592
1593 return ret;
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1657{
1658 struct mtd_info *mtd = nand_to_mtd(chip);
1659 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1660 int ret;
1661
1662
1663 memset(chip->oob_poi, ~0, mtd->oobsize);
1664
1665
1666 ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1667 mtd->oobsize);
1668 if (ret)
1669 return ret;
1670
1671
1672
1673
1674
1675
1676 if (GPMI_IS_MX23(this)) {
1677
1678 ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1679 if (ret)
1680 return ret;
1681 }
1682
1683 return 0;
1684}
1685
1686static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
1687{
1688 struct mtd_info *mtd = nand_to_mtd(chip);
1689 struct mtd_oob_region of = { };
1690
1691
1692 mtd_ooblayout_free(mtd, 0, &of);
1693 if (!of.length)
1694 return -EPERM;
1695
1696 if (!nand_is_slc(chip))
1697 return -EPERM;
1698
1699 return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1700 chip->oob_poi + of.offset, of.length);
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1716 int oob_required, int page)
1717{
1718 struct mtd_info *mtd = nand_to_mtd(chip);
1719 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1720 struct bch_geometry *nfc_geo = &this->bch_geometry;
1721 int eccsize = nfc_geo->ecc_chunk_size;
1722 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1723 u8 *tmp_buf = this->raw_buffer;
1724 size_t src_bit_off;
1725 size_t oob_bit_off;
1726 size_t oob_byte_off;
1727 uint8_t *oob = chip->oob_poi;
1728 int step;
1729 int ret;
1730
1731 ret = nand_read_page_op(chip, page, 0, tmp_buf,
1732 mtd->writesize + mtd->oobsize);
1733 if (ret)
1734 return ret;
1735
1736
1737
1738
1739
1740
1741
1742
1743 if (this->swap_block_mark)
1744 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1745
1746
1747
1748
1749
1750 if (oob_required)
1751 memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1752
1753 oob_bit_off = nfc_geo->metadata_size * 8;
1754 src_bit_off = oob_bit_off;
1755
1756
1757 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1758 if (buf)
1759 gpmi_copy_bits(buf, step * eccsize * 8,
1760 tmp_buf, src_bit_off,
1761 eccsize * 8);
1762 src_bit_off += eccsize * 8;
1763
1764
1765 if (step == nfc_geo->ecc_chunk_count - 1 &&
1766 (oob_bit_off + eccbits) % 8)
1767 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1768
1769 if (oob_required)
1770 gpmi_copy_bits(oob, oob_bit_off,
1771 tmp_buf, src_bit_off,
1772 eccbits);
1773
1774 src_bit_off += eccbits;
1775 oob_bit_off += eccbits;
1776 }
1777
1778 if (oob_required) {
1779 oob_byte_off = oob_bit_off / 8;
1780
1781 if (oob_byte_off < mtd->oobsize)
1782 memcpy(oob + oob_byte_off,
1783 tmp_buf + mtd->writesize + oob_byte_off,
1784 mtd->oobsize - oob_byte_off);
1785 }
1786
1787 return 0;
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1803 int oob_required, int page)
1804{
1805 struct mtd_info *mtd = nand_to_mtd(chip);
1806 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1807 struct bch_geometry *nfc_geo = &this->bch_geometry;
1808 int eccsize = nfc_geo->ecc_chunk_size;
1809 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1810 u8 *tmp_buf = this->raw_buffer;
1811 uint8_t *oob = chip->oob_poi;
1812 size_t dst_bit_off;
1813 size_t oob_bit_off;
1814 size_t oob_byte_off;
1815 int step;
1816
1817
1818
1819
1820
1821
1822 if (!buf || !oob_required)
1823 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1824
1825
1826
1827
1828
1829 memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1830 oob_bit_off = nfc_geo->metadata_size * 8;
1831 dst_bit_off = oob_bit_off;
1832
1833
1834 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1835 if (buf)
1836 gpmi_copy_bits(tmp_buf, dst_bit_off,
1837 buf, step * eccsize * 8, eccsize * 8);
1838 dst_bit_off += eccsize * 8;
1839
1840
1841 if (step == nfc_geo->ecc_chunk_count - 1 &&
1842 (oob_bit_off + eccbits) % 8)
1843 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1844
1845 if (oob_required)
1846 gpmi_copy_bits(tmp_buf, dst_bit_off,
1847 oob, oob_bit_off, eccbits);
1848
1849 dst_bit_off += eccbits;
1850 oob_bit_off += eccbits;
1851 }
1852
1853 oob_byte_off = oob_bit_off / 8;
1854
1855 if (oob_required && oob_byte_off < mtd->oobsize)
1856 memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1857 oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1858
1859
1860
1861
1862
1863
1864
1865
1866 if (this->swap_block_mark)
1867 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1868
1869 return nand_prog_page_op(chip, page, 0, tmp_buf,
1870 mtd->writesize + mtd->oobsize);
1871}
1872
1873static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
1874{
1875 return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
1876}
1877
1878static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
1879{
1880 return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
1881}
1882
1883static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
1884{
1885 struct mtd_info *mtd = nand_to_mtd(chip);
1886 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1887 int ret = 0;
1888 uint8_t *block_mark;
1889 int column, page, chipnr;
1890
1891 chipnr = (int)(ofs >> chip->chip_shift);
1892 nand_select_target(chip, chipnr);
1893
1894 column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
1895
1896
1897 block_mark = this->data_buffer_dma;
1898 block_mark[0] = 0;
1899
1900
1901 page = (int)(ofs >> chip->page_shift);
1902
1903 ret = nand_prog_page_op(chip, page, column, block_mark, 1);
1904
1905 nand_deselect_target(chip);
1906
1907 return ret;
1908}
1909
1910static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1911{
1912 struct boot_rom_geometry *geometry = &this->rom_geometry;
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922 geometry->stride_size_in_pages = 64;
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932 geometry->search_area_stride_exponent = 2;
1933 return 0;
1934}
1935
1936static const char *fingerprint = "STMP";
1937static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1938{
1939 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1940 struct device *dev = this->dev;
1941 struct nand_chip *chip = &this->nand;
1942 unsigned int search_area_size_in_strides;
1943 unsigned int stride;
1944 unsigned int page;
1945 u8 *buffer = nand_get_data_buf(chip);
1946 int found_an_ncb_fingerprint = false;
1947 int ret;
1948
1949
1950 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1951
1952 nand_select_target(chip, 0);
1953
1954
1955
1956
1957 dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1958
1959 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1960
1961 page = stride * rom_geo->stride_size_in_pages;
1962
1963 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1964
1965
1966
1967
1968
1969 ret = nand_read_page_op(chip, page, 12, buffer,
1970 strlen(fingerprint));
1971 if (ret)
1972 continue;
1973
1974
1975 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1976 found_an_ncb_fingerprint = true;
1977 break;
1978 }
1979
1980 }
1981
1982 nand_deselect_target(chip);
1983
1984 if (found_an_ncb_fingerprint)
1985 dev_dbg(dev, "\tFound a fingerprint\n");
1986 else
1987 dev_dbg(dev, "\tNo fingerprint found\n");
1988 return found_an_ncb_fingerprint;
1989}
1990
1991
1992static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1993{
1994 struct device *dev = this->dev;
1995 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1996 struct nand_chip *chip = &this->nand;
1997 struct mtd_info *mtd = nand_to_mtd(chip);
1998 unsigned int block_size_in_pages;
1999 unsigned int search_area_size_in_strides;
2000 unsigned int search_area_size_in_pages;
2001 unsigned int search_area_size_in_blocks;
2002 unsigned int block;
2003 unsigned int stride;
2004 unsigned int page;
2005 u8 *buffer = nand_get_data_buf(chip);
2006 int status;
2007
2008
2009 block_size_in_pages = mtd->erasesize / mtd->writesize;
2010 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
2011 search_area_size_in_pages = search_area_size_in_strides *
2012 rom_geo->stride_size_in_pages;
2013 search_area_size_in_blocks =
2014 (search_area_size_in_pages + (block_size_in_pages - 1)) /
2015 block_size_in_pages;
2016
2017 dev_dbg(dev, "Search Area Geometry :\n");
2018 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
2019 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
2020 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
2021
2022 nand_select_target(chip, 0);
2023
2024
2025 dev_dbg(dev, "Erasing the search area...\n");
2026
2027 for (block = 0; block < search_area_size_in_blocks; block++) {
2028
2029 dev_dbg(dev, "\tErasing block 0x%x\n", block);
2030 status = nand_erase_op(chip, block);
2031 if (status)
2032 dev_err(dev, "[%s] Erase failed.\n", __func__);
2033 }
2034
2035
2036 memset(buffer, ~0, mtd->writesize);
2037 memcpy(buffer + 12, fingerprint, strlen(fingerprint));
2038
2039
2040 dev_dbg(dev, "Writing NCB fingerprints...\n");
2041 for (stride = 0; stride < search_area_size_in_strides; stride++) {
2042
2043 page = stride * rom_geo->stride_size_in_pages;
2044
2045
2046 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
2047
2048 status = chip->ecc.write_page_raw(chip, buffer, 0, page);
2049 if (status)
2050 dev_err(dev, "[%s] Write failed.\n", __func__);
2051 }
2052
2053 nand_deselect_target(chip);
2054
2055 return 0;
2056}
2057
2058static int mx23_boot_init(struct gpmi_nand_data *this)
2059{
2060 struct device *dev = this->dev;
2061 struct nand_chip *chip = &this->nand;
2062 struct mtd_info *mtd = nand_to_mtd(chip);
2063 unsigned int block_count;
2064 unsigned int block;
2065 int chipnr;
2066 int page;
2067 loff_t byte;
2068 uint8_t block_mark;
2069 int ret = 0;
2070
2071
2072
2073
2074
2075
2076
2077 if (mx23_check_transcription_stamp(this))
2078 return 0;
2079
2080
2081
2082
2083
2084 dev_dbg(dev, "Transcribing bad block marks...\n");
2085
2086
2087 block_count = nanddev_eraseblocks_per_target(&chip->base);
2088
2089
2090
2091
2092
2093 for (block = 0; block < block_count; block++) {
2094
2095
2096
2097
2098 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
2099 page = block << (chip->phys_erase_shift - chip->page_shift);
2100 byte = block << chip->phys_erase_shift;
2101
2102
2103 nand_select_target(chip, chipnr);
2104 ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
2105 1);
2106 nand_deselect_target(chip);
2107
2108 if (ret)
2109 continue;
2110
2111
2112
2113
2114
2115
2116 if (block_mark != 0xff) {
2117 dev_dbg(dev, "Transcribing mark in block %u\n", block);
2118 ret = chip->legacy.block_markbad(chip, byte);
2119 if (ret)
2120 dev_err(dev,
2121 "Failed to mark block bad with ret %d\n",
2122 ret);
2123 }
2124 }
2125
2126
2127 mx23_write_transcription_stamp(this);
2128 return 0;
2129}
2130
2131static int nand_boot_init(struct gpmi_nand_data *this)
2132{
2133 nand_boot_set_geometry(this);
2134
2135
2136 if (GPMI_IS_MX23(this))
2137 return mx23_boot_init(this);
2138 return 0;
2139}
2140
2141static int gpmi_set_geometry(struct gpmi_nand_data *this)
2142{
2143 int ret;
2144
2145
2146 gpmi_free_dma_buffer(this);
2147
2148
2149 ret = bch_set_geometry(this);
2150 if (ret) {
2151 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
2152 return ret;
2153 }
2154
2155
2156 return gpmi_alloc_dma_buffer(this);
2157}
2158
2159static int gpmi_init_last(struct gpmi_nand_data *this)
2160{
2161 struct nand_chip *chip = &this->nand;
2162 struct mtd_info *mtd = nand_to_mtd(chip);
2163 struct nand_ecc_ctrl *ecc = &chip->ecc;
2164 struct bch_geometry *bch_geo = &this->bch_geometry;
2165 int ret;
2166
2167
2168 ret = gpmi_set_geometry(this);
2169 if (ret)
2170 return ret;
2171
2172
2173 ecc->read_page = gpmi_ecc_read_page;
2174 ecc->write_page = gpmi_ecc_write_page;
2175 ecc->read_oob = gpmi_ecc_read_oob;
2176 ecc->write_oob = gpmi_ecc_write_oob;
2177 ecc->read_page_raw = gpmi_ecc_read_page_raw;
2178 ecc->write_page_raw = gpmi_ecc_write_page_raw;
2179 ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
2180 ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
2181 ecc->mode = NAND_ECC_HW;
2182 ecc->size = bch_geo->ecc_chunk_size;
2183 ecc->strength = bch_geo->ecc_strength;
2184 mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
2185
2186
2187
2188
2189
2190
2191 if (GPMI_IS_MX6(this) &&
2192 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
2193 ecc->read_subpage = gpmi_ecc_read_subpage;
2194 chip->options |= NAND_SUBPAGE_READ;
2195 }
2196
2197 return 0;
2198}
2199
2200static int gpmi_nand_attach_chip(struct nand_chip *chip)
2201{
2202 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2203 int ret;
2204
2205 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2206 chip->bbt_options |= NAND_BBT_NO_OOB;
2207
2208 if (of_property_read_bool(this->dev->of_node,
2209 "fsl,no-blockmark-swap"))
2210 this->swap_block_mark = false;
2211 }
2212 dev_dbg(this->dev, "Blockmark swapping %sabled\n",
2213 this->swap_block_mark ? "en" : "dis");
2214
2215 ret = gpmi_init_last(this);
2216 if (ret)
2217 return ret;
2218
2219 chip->options |= NAND_SKIP_BBTSCAN;
2220
2221 return 0;
2222}
2223
2224static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2225{
2226 struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2227
2228 this->ntransfers++;
2229
2230 if (this->ntransfers == GPMI_MAX_TRANSFERS)
2231 return NULL;
2232
2233 return transfer;
2234}
2235
2236static struct dma_async_tx_descriptor *gpmi_chain_command(
2237 struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2238{
2239 struct dma_chan *channel = get_dma_chan(this);
2240 struct dma_async_tx_descriptor *desc;
2241 struct gpmi_transfer *transfer;
2242 int chip = this->nand.cur_cs;
2243 u32 pio[3];
2244
2245
2246 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2247 | BM_GPMI_CTRL0_WORD_LENGTH
2248 | BF_GPMI_CTRL0_CS(chip, this)
2249 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2250 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2251 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2252 | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2253 pio[1] = 0;
2254 pio[2] = 0;
2255 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2256 DMA_TRANS_NONE, 0);
2257 if (!desc)
2258 return NULL;
2259
2260 transfer = get_next_transfer(this);
2261 if (!transfer)
2262 return NULL;
2263
2264 transfer->cmdbuf[0] = cmd;
2265 if (naddr)
2266 memcpy(&transfer->cmdbuf[1], addr, naddr);
2267
2268 sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2269 dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2270
2271 transfer->direction = DMA_TO_DEVICE;
2272
2273 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2274 MXS_DMA_CTRL_WAIT4END);
2275 return desc;
2276}
2277
2278static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2279 struct gpmi_nand_data *this)
2280{
2281 struct dma_chan *channel = get_dma_chan(this);
2282 u32 pio[2];
2283
2284 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2285 | BM_GPMI_CTRL0_WORD_LENGTH
2286 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2287 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2288 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2289 | BF_GPMI_CTRL0_XFER_COUNT(0);
2290 pio[1] = 0;
2291
2292 return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2293 MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2294}
2295
2296static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2297 struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2298{
2299 struct dma_async_tx_descriptor *desc;
2300 struct dma_chan *channel = get_dma_chan(this);
2301 struct gpmi_transfer *transfer;
2302 u32 pio[6] = {};
2303
2304 transfer = get_next_transfer(this);
2305 if (!transfer)
2306 return NULL;
2307
2308 transfer->direction = DMA_FROM_DEVICE;
2309
2310 *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2311 DMA_FROM_DEVICE);
2312
2313 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2314 | BM_GPMI_CTRL0_WORD_LENGTH
2315 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2316 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2317 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2318 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2319
2320 if (this->bch) {
2321 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2322 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2323 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2324 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2325 pio[3] = raw_len;
2326 pio[4] = transfer->sgl.dma_address;
2327 pio[5] = this->auxiliary_phys;
2328 }
2329
2330 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2331 DMA_TRANS_NONE, 0);
2332 if (!desc)
2333 return NULL;
2334
2335 if (!this->bch)
2336 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2337 DMA_DEV_TO_MEM,
2338 MXS_DMA_CTRL_WAIT4END);
2339
2340 return desc;
2341}
2342
2343static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2344 struct gpmi_nand_data *this, const void *buf, int raw_len)
2345{
2346 struct dma_chan *channel = get_dma_chan(this);
2347 struct dma_async_tx_descriptor *desc;
2348 struct gpmi_transfer *transfer;
2349 u32 pio[6] = {};
2350
2351 transfer = get_next_transfer(this);
2352 if (!transfer)
2353 return NULL;
2354
2355 transfer->direction = DMA_TO_DEVICE;
2356
2357 prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2358
2359 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2360 | BM_GPMI_CTRL0_WORD_LENGTH
2361 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2362 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2363 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2364 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2365
2366 if (this->bch) {
2367 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2368 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2369 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2370 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2371 pio[3] = raw_len;
2372 pio[4] = transfer->sgl.dma_address;
2373 pio[5] = this->auxiliary_phys;
2374 }
2375
2376 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2377 DMA_TRANS_NONE,
2378 (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2379 if (!desc)
2380 return NULL;
2381
2382 if (!this->bch)
2383 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2384 DMA_MEM_TO_DEV,
2385 MXS_DMA_CTRL_WAIT4END);
2386
2387 return desc;
2388}
2389
2390static int gpmi_nfc_exec_op(struct nand_chip *chip,
2391 const struct nand_operation *op,
2392 bool check_only)
2393{
2394 const struct nand_op_instr *instr;
2395 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2396 struct dma_async_tx_descriptor *desc = NULL;
2397 int i, ret, buf_len = 0, nbufs = 0;
2398 u8 cmd = 0;
2399 void *buf_read = NULL;
2400 const void *buf_write = NULL;
2401 bool direct = false;
2402 struct completion *completion;
2403 unsigned long to;
2404
2405 this->ntransfers = 0;
2406 for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2407 this->transfers[i].direction = DMA_NONE;
2408
2409 ret = pm_runtime_get_sync(this->dev);
2410 if (ret < 0)
2411 return ret;
2412
2413
2414
2415
2416
2417
2418
2419 if (this->hw.must_apply_timings) {
2420 this->hw.must_apply_timings = false;
2421 gpmi_nfc_apply_timings(this);
2422 }
2423
2424 dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2425
2426 for (i = 0; i < op->ninstrs; i++) {
2427 instr = &op->instrs[i];
2428
2429 nand_op_trace(" ", instr);
2430
2431 switch (instr->type) {
2432 case NAND_OP_WAITRDY_INSTR:
2433 desc = gpmi_chain_wait_ready(this);
2434 break;
2435 case NAND_OP_CMD_INSTR:
2436 cmd = instr->ctx.cmd.opcode;
2437
2438
2439
2440
2441
2442 if (i + 1 != op->ninstrs &&
2443 op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2444 continue;
2445
2446 desc = gpmi_chain_command(this, cmd, NULL, 0);
2447
2448 break;
2449 case NAND_OP_ADDR_INSTR:
2450 desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2451 instr->ctx.addr.naddrs);
2452 break;
2453 case NAND_OP_DATA_OUT_INSTR:
2454 buf_write = instr->ctx.data.buf.out;
2455 buf_len = instr->ctx.data.len;
2456 nbufs++;
2457
2458 desc = gpmi_chain_data_write(this, buf_write, buf_len);
2459
2460 break;
2461 case NAND_OP_DATA_IN_INSTR:
2462 if (!instr->ctx.data.len)
2463 break;
2464 buf_read = instr->ctx.data.buf.in;
2465 buf_len = instr->ctx.data.len;
2466 nbufs++;
2467
2468 desc = gpmi_chain_data_read(this, buf_read, buf_len,
2469 &direct);
2470 break;
2471 }
2472
2473 if (!desc) {
2474 ret = -ENXIO;
2475 goto unmap;
2476 }
2477 }
2478
2479 dev_dbg(this->dev, "%s setup done\n", __func__);
2480
2481 if (nbufs > 1) {
2482 dev_err(this->dev, "Multiple data instructions not supported\n");
2483 ret = -EINVAL;
2484 goto unmap;
2485 }
2486
2487 if (this->bch) {
2488 writel(this->bch_flashlayout0,
2489 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2490 writel(this->bch_flashlayout1,
2491 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2492 }
2493
2494 if (this->bch && buf_read) {
2495 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2496 this->resources.bch_regs + HW_BCH_CTRL_SET);
2497 completion = &this->bch_done;
2498 } else {
2499 desc->callback = dma_irq_callback;
2500 desc->callback_param = this;
2501 completion = &this->dma_done;
2502 }
2503
2504 init_completion(completion);
2505
2506 dmaengine_submit(desc);
2507 dma_async_issue_pending(get_dma_chan(this));
2508
2509 to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
2510 if (!to) {
2511 dev_err(this->dev, "DMA timeout, last DMA\n");
2512 gpmi_dump_info(this);
2513 ret = -ETIMEDOUT;
2514 goto unmap;
2515 }
2516
2517 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2518 this->resources.bch_regs + HW_BCH_CTRL_CLR);
2519 gpmi_clear_bch(this);
2520
2521 ret = 0;
2522
2523unmap:
2524 for (i = 0; i < this->ntransfers; i++) {
2525 struct gpmi_transfer *transfer = &this->transfers[i];
2526
2527 if (transfer->direction != DMA_NONE)
2528 dma_unmap_sg(this->dev, &transfer->sgl, 1,
2529 transfer->direction);
2530 }
2531
2532 if (!ret && buf_read && !direct)
2533 memcpy(buf_read, this->data_buffer_dma,
2534 gpmi_raw_len_to_len(this, buf_len));
2535
2536 this->bch = false;
2537
2538 pm_runtime_mark_last_busy(this->dev);
2539 pm_runtime_put_autosuspend(this->dev);
2540
2541 return ret;
2542}
2543
2544static const struct nand_controller_ops gpmi_nand_controller_ops = {
2545 .attach_chip = gpmi_nand_attach_chip,
2546 .setup_data_interface = gpmi_setup_data_interface,
2547 .exec_op = gpmi_nfc_exec_op,
2548};
2549
2550static int gpmi_nand_init(struct gpmi_nand_data *this)
2551{
2552 struct nand_chip *chip = &this->nand;
2553 struct mtd_info *mtd = nand_to_mtd(chip);
2554 int ret;
2555
2556
2557 mtd->name = "gpmi-nand";
2558 mtd->dev.parent = this->dev;
2559
2560
2561 nand_set_controller_data(chip, this);
2562 nand_set_flash_node(chip, this->pdev->dev.of_node);
2563 chip->legacy.block_markbad = gpmi_block_markbad;
2564 chip->badblock_pattern = &gpmi_bbt_descr;
2565 chip->options |= NAND_NO_SUBPAGE_WRITE;
2566
2567
2568 this->swap_block_mark = !GPMI_IS_MX23(this);
2569
2570
2571
2572
2573
2574 this->bch_geometry.payload_size = 1024;
2575 this->bch_geometry.auxiliary_size = 128;
2576 ret = gpmi_alloc_dma_buffer(this);
2577 if (ret)
2578 goto err_out;
2579
2580 nand_controller_init(&this->base);
2581 this->base.ops = &gpmi_nand_controller_ops;
2582 chip->controller = &this->base;
2583
2584 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
2585 if (ret)
2586 goto err_out;
2587
2588 ret = nand_boot_init(this);
2589 if (ret)
2590 goto err_nand_cleanup;
2591 ret = nand_create_bbt(chip);
2592 if (ret)
2593 goto err_nand_cleanup;
2594
2595 ret = mtd_device_register(mtd, NULL, 0);
2596 if (ret)
2597 goto err_nand_cleanup;
2598 return 0;
2599
2600err_nand_cleanup:
2601 nand_cleanup(chip);
2602err_out:
2603 gpmi_free_dma_buffer(this);
2604 return ret;
2605}
2606
2607static const struct of_device_id gpmi_nand_id_table[] = {
2608 {
2609 .compatible = "fsl,imx23-gpmi-nand",
2610 .data = &gpmi_devdata_imx23,
2611 }, {
2612 .compatible = "fsl,imx28-gpmi-nand",
2613 .data = &gpmi_devdata_imx28,
2614 }, {
2615 .compatible = "fsl,imx6q-gpmi-nand",
2616 .data = &gpmi_devdata_imx6q,
2617 }, {
2618 .compatible = "fsl,imx6sx-gpmi-nand",
2619 .data = &gpmi_devdata_imx6sx,
2620 }, {
2621 .compatible = "fsl,imx7d-gpmi-nand",
2622 .data = &gpmi_devdata_imx7d,
2623 }, {}
2624};
2625MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
2626
2627static int gpmi_nand_probe(struct platform_device *pdev)
2628{
2629 struct gpmi_nand_data *this;
2630 const struct of_device_id *of_id;
2631 int ret;
2632
2633 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
2634 if (!this)
2635 return -ENOMEM;
2636
2637 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
2638 if (of_id) {
2639 this->devdata = of_id->data;
2640 } else {
2641 dev_err(&pdev->dev, "Failed to find the right device id.\n");
2642 return -ENODEV;
2643 }
2644
2645 platform_set_drvdata(pdev, this);
2646 this->pdev = pdev;
2647 this->dev = &pdev->dev;
2648
2649 ret = acquire_resources(this);
2650 if (ret)
2651 goto exit_acquire_resources;
2652
2653 ret = __gpmi_enable_clk(this, true);
2654 if (ret)
2655 goto exit_nfc_init;
2656
2657 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2658 pm_runtime_use_autosuspend(&pdev->dev);
2659 pm_runtime_set_active(&pdev->dev);
2660 pm_runtime_enable(&pdev->dev);
2661 pm_runtime_get_sync(&pdev->dev);
2662
2663 ret = gpmi_init(this);
2664 if (ret)
2665 goto exit_nfc_init;
2666
2667 ret = gpmi_nand_init(this);
2668 if (ret)
2669 goto exit_nfc_init;
2670
2671 pm_runtime_mark_last_busy(&pdev->dev);
2672 pm_runtime_put_autosuspend(&pdev->dev);
2673
2674 dev_info(this->dev, "driver registered.\n");
2675
2676 return 0;
2677
2678exit_nfc_init:
2679 pm_runtime_put(&pdev->dev);
2680 pm_runtime_disable(&pdev->dev);
2681 release_resources(this);
2682exit_acquire_resources:
2683
2684 return ret;
2685}
2686
2687static int gpmi_nand_remove(struct platform_device *pdev)
2688{
2689 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2690
2691 pm_runtime_put_sync(&pdev->dev);
2692 pm_runtime_disable(&pdev->dev);
2693
2694 nand_release(&this->nand);
2695 gpmi_free_dma_buffer(this);
2696 release_resources(this);
2697 return 0;
2698}
2699
2700#ifdef CONFIG_PM_SLEEP
2701static int gpmi_pm_suspend(struct device *dev)
2702{
2703 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2704
2705 release_dma_channels(this);
2706 return 0;
2707}
2708
2709static int gpmi_pm_resume(struct device *dev)
2710{
2711 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2712 int ret;
2713
2714 ret = acquire_dma_channels(this);
2715 if (ret < 0)
2716 return ret;
2717
2718
2719 ret = gpmi_init(this);
2720 if (ret) {
2721 dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2722 return ret;
2723 }
2724
2725
2726 ret = bch_set_geometry(this);
2727 if (ret) {
2728 dev_err(this->dev, "Error setting BCH : %d\n", ret);
2729 return ret;
2730 }
2731
2732 return 0;
2733}
2734#endif
2735
2736static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2737{
2738 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2739
2740 return __gpmi_enable_clk(this, false);
2741}
2742
2743static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2744{
2745 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2746
2747 return __gpmi_enable_clk(this, true);
2748}
2749
2750static const struct dev_pm_ops gpmi_pm_ops = {
2751 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2752 SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
2753};
2754
2755static struct platform_driver gpmi_nand_driver = {
2756 .driver = {
2757 .name = "gpmi-nand",
2758 .pm = &gpmi_pm_ops,
2759 .of_match_table = gpmi_nand_id_table,
2760 },
2761 .probe = gpmi_nand_probe,
2762 .remove = gpmi_nand_remove,
2763};
2764module_platform_driver(gpmi_nand_driver);
2765
2766MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2767MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2768MODULE_LICENSE("GPL");
2769