1
2
3
4
5
6
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/slab.h>
11#include <linux/sched/task_stack.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
14#include <linux/mtd/partitions.h>
15#include <linux/of.h>
16#include <linux/of_device.h>
17#include <linux/pm_runtime.h>
18#include <linux/dma/mxs-dma.h>
19#include "gpmi-nand.h"
20#include "gpmi-regs.h"
21#include "bch-regs.h"
22
23
24#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
25#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
26#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
27
28
29#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
30
31#define MXS_SET_ADDR 0x4
32#define MXS_CLR_ADDR 0x8
33
34
35
36
37
38static int clear_poll_bit(void __iomem *addr, u32 mask)
39{
40 int timeout = 0x400;
41
42
43 writel(mask, addr + MXS_CLR_ADDR);
44
45
46
47
48
49 udelay(1);
50
51
52 while ((readl(addr) & mask) && --timeout)
53 ;
54
55 return !timeout;
56}
57
58#define MODULE_CLKGATE (1 << 30)
59#define MODULE_SFTRST (1 << 31)
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
78{
79 int ret;
80 int timeout = 0x400;
81
82
83 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
84 if (unlikely(ret))
85 goto error;
86
87
88 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
89
90 if (!just_enable) {
91
92 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
93 udelay(1);
94
95
96 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
97 ;
98 if (unlikely(!timeout))
99 goto error;
100 }
101
102
103 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
104 if (unlikely(ret))
105 goto error;
106
107
108 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
109 if (unlikely(ret))
110 goto error;
111
112 return 0;
113
114error:
115 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
116 return -ETIMEDOUT;
117}
118
119static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
120{
121 struct clk *clk;
122 int ret;
123 int i;
124
125 for (i = 0; i < GPMI_CLK_MAX; i++) {
126 clk = this->resources.clock[i];
127 if (!clk)
128 break;
129
130 if (v) {
131 ret = clk_prepare_enable(clk);
132 if (ret)
133 goto err_clk;
134 } else {
135 clk_disable_unprepare(clk);
136 }
137 }
138 return 0;
139
140err_clk:
141 for (; i > 0; i--)
142 clk_disable_unprepare(this->resources.clock[i - 1]);
143 return ret;
144}
145
146static int gpmi_init(struct gpmi_nand_data *this)
147{
148 struct resources *r = &this->resources;
149 int ret;
150
151 ret = pm_runtime_get_sync(this->dev);
152 if (ret < 0)
153 return ret;
154
155 ret = gpmi_reset_block(r->gpmi_regs, false);
156 if (ret)
157 goto err_out;
158
159
160
161
162
163 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
164 if (ret)
165 goto err_out;
166
167
168 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
169
170
171 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
172 r->gpmi_regs + HW_GPMI_CTRL1_SET);
173
174
175 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
176
177
178 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
179
180
181
182
183
184 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
185
186err_out:
187 pm_runtime_mark_last_busy(this->dev);
188 pm_runtime_put_autosuspend(this->dev);
189 return ret;
190}
191
192
193static void gpmi_dump_info(struct gpmi_nand_data *this)
194{
195 struct resources *r = &this->resources;
196 struct bch_geometry *geo = &this->bch_geometry;
197 u32 reg;
198 int i;
199
200 dev_err(this->dev, "Show GPMI registers :\n");
201 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
202 reg = readl(r->gpmi_regs + i * 0x10);
203 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
204 }
205
206
207 dev_err(this->dev, "Show BCH registers :\n");
208 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
209 reg = readl(r->bch_regs + i * 0x10);
210 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
211 }
212 dev_err(this->dev, "BCH Geometry :\n"
213 "GF length : %u\n"
214 "ECC Strength : %u\n"
215 "Page Size in Bytes : %u\n"
216 "Metadata Size in Bytes : %u\n"
217 "ECC Chunk Size in Bytes: %u\n"
218 "ECC Chunk Count : %u\n"
219 "Payload Size in Bytes : %u\n"
220 "Auxiliary Size in Bytes: %u\n"
221 "Auxiliary Status Offset: %u\n"
222 "Block Mark Byte Offset : %u\n"
223 "Block Mark Bit Offset : %u\n",
224 geo->gf_len,
225 geo->ecc_strength,
226 geo->page_size,
227 geo->metadata_size,
228 geo->ecc_chunk_size,
229 geo->ecc_chunk_count,
230 geo->payload_size,
231 geo->auxiliary_size,
232 geo->auxiliary_status_offset,
233 geo->block_mark_byte_offset,
234 geo->block_mark_bit_offset);
235}
236
237static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
238{
239 struct bch_geometry *geo = &this->bch_geometry;
240
241
242 if (GPMI_IS_MXS(this)) {
243
244 if (geo->gf_len == 14)
245 return false;
246 }
247 return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
248}
249
250
251
252
253
254
255
256static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
257 unsigned int ecc_strength,
258 unsigned int ecc_step)
259{
260 struct bch_geometry *geo = &this->bch_geometry;
261 struct nand_chip *chip = &this->nand;
262 struct mtd_info *mtd = nand_to_mtd(chip);
263 unsigned int block_mark_bit_offset;
264
265 switch (ecc_step) {
266 case SZ_512:
267 geo->gf_len = 13;
268 break;
269 case SZ_1K:
270 geo->gf_len = 14;
271 break;
272 default:
273 dev_err(this->dev,
274 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
275 nanddev_get_ecc_requirements(&chip->base)->strength,
276 nanddev_get_ecc_requirements(&chip->base)->step_size);
277 return -EINVAL;
278 }
279 geo->ecc_chunk_size = ecc_step;
280 geo->ecc_strength = round_up(ecc_strength, 2);
281 if (!gpmi_check_ecc(this))
282 return -EINVAL;
283
284
285 if (geo->ecc_chunk_size < mtd->oobsize) {
286 dev_err(this->dev,
287 "unsupported nand chip. ecc size: %d, oob size : %d\n",
288 ecc_step, mtd->oobsize);
289 return -EINVAL;
290 }
291
292
293 geo->metadata_size = 10;
294
295 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345 geo->page_size = mtd->writesize + geo->metadata_size +
346 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
347
348 geo->payload_size = mtd->writesize;
349
350 geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
351 geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
352 + ALIGN(geo->ecc_chunk_count, 4);
353
354 if (!this->swap_block_mark)
355 return 0;
356
357
358 block_mark_bit_offset = mtd->writesize * 8 -
359 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
360 + geo->metadata_size * 8);
361
362 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
363 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
364 return 0;
365}
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385static inline int get_ecc_strength(struct gpmi_nand_data *this)
386{
387 struct bch_geometry *geo = &this->bch_geometry;
388 struct mtd_info *mtd = nand_to_mtd(&this->nand);
389 int ecc_strength;
390
391 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
392 / (geo->gf_len * geo->ecc_chunk_count);
393
394
395 return round_down(ecc_strength, 2);
396}
397
398static int legacy_set_geometry(struct gpmi_nand_data *this)
399{
400 struct bch_geometry *geo = &this->bch_geometry;
401 struct mtd_info *mtd = nand_to_mtd(&this->nand);
402 unsigned int metadata_size;
403 unsigned int status_size;
404 unsigned int block_mark_bit_offset;
405
406
407
408
409
410
411 geo->metadata_size = 10;
412
413
414 geo->gf_len = 13;
415
416
417 geo->ecc_chunk_size = 512;
418 while (geo->ecc_chunk_size < mtd->oobsize) {
419 geo->ecc_chunk_size *= 2;
420 geo->gf_len = 14;
421 }
422
423 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
424
425
426 geo->ecc_strength = get_ecc_strength(this);
427 if (!gpmi_check_ecc(this)) {
428 dev_err(this->dev,
429 "ecc strength: %d cannot be supported by the controller (%d)\n"
430 "try to use minimum ecc strength that NAND chip required\n",
431 geo->ecc_strength,
432 this->devdata->bch_max_ecc_strength);
433 return -EINVAL;
434 }
435
436 geo->page_size = mtd->writesize + geo->metadata_size +
437 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
438 geo->payload_size = mtd->writesize;
439
440
441
442
443
444
445
446 metadata_size = ALIGN(geo->metadata_size, 4);
447 status_size = ALIGN(geo->ecc_chunk_count, 4);
448
449 geo->auxiliary_size = metadata_size + status_size;
450 geo->auxiliary_status_offset = metadata_size;
451
452 if (!this->swap_block_mark)
453 return 0;
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 block_mark_bit_offset = mtd->writesize * 8 -
502 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
503 + geo->metadata_size * 8);
504
505 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
506 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
507 return 0;
508}
509
510static int common_nfc_set_geometry(struct gpmi_nand_data *this)
511{
512 struct nand_chip *chip = &this->nand;
513 const struct nand_ecc_props *requirements =
514 nanddev_get_ecc_requirements(&chip->base);
515
516 if (chip->ecc.strength > 0 && chip->ecc.size > 0)
517 return set_geometry_by_ecc_info(this, chip->ecc.strength,
518 chip->ecc.size);
519
520 if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
521 || legacy_set_geometry(this)) {
522 if (!(requirements->strength > 0 && requirements->step_size > 0))
523 return -EINVAL;
524
525 return set_geometry_by_ecc_info(this,
526 requirements->strength,
527 requirements->step_size);
528 }
529
530 return 0;
531}
532
533
534static int bch_set_geometry(struct gpmi_nand_data *this)
535{
536 struct resources *r = &this->resources;
537 int ret;
538
539 ret = common_nfc_set_geometry(this);
540 if (ret)
541 return ret;
542
543 ret = pm_runtime_get_sync(this->dev);
544 if (ret < 0) {
545 pm_runtime_put_autosuspend(this->dev);
546 return ret;
547 }
548
549
550
551
552
553
554 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
555 if (ret)
556 goto err_out;
557
558
559 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
560
561 ret = 0;
562err_out:
563 pm_runtime_mark_last_busy(this->dev);
564 pm_runtime_put_autosuspend(this->dev);
565
566 return ret;
567}
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
644 const struct nand_sdr_timings *sdr)
645{
646 struct gpmi_nfc_hardware_timing *hw = &this->hw;
647 unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
648 unsigned int period_ps, reference_period_ps;
649 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
650 unsigned int tRP_ps;
651 bool use_half_period;
652 int sample_delay_ps, sample_delay_factor;
653 u16 busy_timeout_cycles;
654 u8 wrn_dly_sel;
655
656 if (sdr->tRC_min >= 30000) {
657
658 hw->clk_rate = 22000000;
659 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
660 } else if (sdr->tRC_min >= 25000) {
661
662 hw->clk_rate = 80000000;
663 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
664 } else {
665
666 hw->clk_rate = 100000000;
667 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
668 }
669
670
671 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
672
673 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
674 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
675 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
676 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
677
678 hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
679 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
680 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
681 hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
682
683
684
685
686
687
688
689
690 if (period_ps > dll_threshold_ps) {
691 use_half_period = true;
692 reference_period_ps = period_ps / 2;
693 } else {
694 use_half_period = false;
695 reference_period_ps = period_ps;
696 }
697
698 tRP_ps = data_setup_cycles * period_ps;
699 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
700 if (sample_delay_ps > 0)
701 sample_delay_factor = sample_delay_ps / reference_period_ps;
702 else
703 sample_delay_factor = 0;
704
705 hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
706 if (sample_delay_factor)
707 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
708 BM_GPMI_CTRL1_DLL_ENABLE |
709 (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
710}
711
712static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
713{
714 struct gpmi_nfc_hardware_timing *hw = &this->hw;
715 struct resources *r = &this->resources;
716 void __iomem *gpmi_regs = r->gpmi_regs;
717 unsigned int dll_wait_time_us;
718
719 clk_set_rate(r->clock[0], hw->clk_rate);
720
721 writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
722 writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
723
724
725
726
727
728 writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
729 writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
730
731
732 dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
733 if (!dll_wait_time_us)
734 dll_wait_time_us = 1;
735
736
737 udelay(dll_wait_time_us);
738}
739
740static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
741 const struct nand_interface_config *conf)
742{
743 struct gpmi_nand_data *this = nand_get_controller_data(chip);
744 const struct nand_sdr_timings *sdr;
745
746
747 sdr = nand_get_sdr_timings(conf);
748 if (IS_ERR(sdr))
749 return PTR_ERR(sdr);
750
751
752 if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
753 return -ENOTSUPP;
754
755
756 if (chipnr < 0)
757 return 0;
758
759
760 gpmi_nfc_compute_timings(this, sdr);
761
762 this->hw.must_apply_timings = true;
763
764 return 0;
765}
766
767
768static void gpmi_clear_bch(struct gpmi_nand_data *this)
769{
770 struct resources *r = &this->resources;
771 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
772}
773
774static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
775{
776
777 return this->dma_chans[0];
778}
779
780
781static void dma_irq_callback(void *param)
782{
783 struct gpmi_nand_data *this = param;
784 struct completion *dma_c = &this->dma_done;
785
786 complete(dma_c);
787}
788
789static irqreturn_t bch_irq(int irq, void *cookie)
790{
791 struct gpmi_nand_data *this = cookie;
792
793 gpmi_clear_bch(this);
794 complete(&this->bch_done);
795 return IRQ_HANDLED;
796}
797
798static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
799{
800
801
802
803
804 if (this->bch)
805 return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
806 else
807 return raw_len;
808}
809
810
811static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
812 int raw_len, struct scatterlist *sgl,
813 enum dma_data_direction dr)
814{
815 int ret;
816 int len = gpmi_raw_len_to_len(this, raw_len);
817
818
819 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
820 sg_init_one(sgl, buf, len);
821 ret = dma_map_sg(this->dev, sgl, 1, dr);
822 if (ret == 0)
823 goto map_fail;
824
825 return true;
826 }
827
828map_fail:
829
830 sg_init_one(sgl, this->data_buffer_dma, len);
831
832 if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
833 memcpy(this->data_buffer_dma, buf, len);
834
835 dma_map_sg(this->dev, sgl, 1, dr);
836
837 return false;
838}
839
840
841static uint8_t scan_ff_pattern[] = { 0xff };
842static struct nand_bbt_descr gpmi_bbt_descr = {
843 .options = 0,
844 .offs = 0,
845 .len = 1,
846 .pattern = scan_ff_pattern
847};
848
849
850
851
852
853static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
854 struct mtd_oob_region *oobregion)
855{
856 struct nand_chip *chip = mtd_to_nand(mtd);
857 struct gpmi_nand_data *this = nand_get_controller_data(chip);
858 struct bch_geometry *geo = &this->bch_geometry;
859
860 if (section)
861 return -ERANGE;
862
863 oobregion->offset = 0;
864 oobregion->length = geo->page_size - mtd->writesize;
865
866 return 0;
867}
868
869static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
870 struct mtd_oob_region *oobregion)
871{
872 struct nand_chip *chip = mtd_to_nand(mtd);
873 struct gpmi_nand_data *this = nand_get_controller_data(chip);
874 struct bch_geometry *geo = &this->bch_geometry;
875
876 if (section)
877 return -ERANGE;
878
879
880 if (geo->page_size < mtd->writesize + mtd->oobsize) {
881 oobregion->offset = geo->page_size - mtd->writesize;
882 oobregion->length = mtd->oobsize - oobregion->offset;
883 }
884
885 return 0;
886}
887
888static const char * const gpmi_clks_for_mx2x[] = {
889 "gpmi_io",
890};
891
892static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
893 .ecc = gpmi_ooblayout_ecc,
894 .free = gpmi_ooblayout_free,
895};
896
897static const struct gpmi_devdata gpmi_devdata_imx23 = {
898 .type = IS_MX23,
899 .bch_max_ecc_strength = 20,
900 .max_chain_delay = 16000,
901 .clks = gpmi_clks_for_mx2x,
902 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
903};
904
905static const struct gpmi_devdata gpmi_devdata_imx28 = {
906 .type = IS_MX28,
907 .bch_max_ecc_strength = 20,
908 .max_chain_delay = 16000,
909 .clks = gpmi_clks_for_mx2x,
910 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
911};
912
913static const char * const gpmi_clks_for_mx6[] = {
914 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
915};
916
917static const struct gpmi_devdata gpmi_devdata_imx6q = {
918 .type = IS_MX6Q,
919 .bch_max_ecc_strength = 40,
920 .max_chain_delay = 12000,
921 .clks = gpmi_clks_for_mx6,
922 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
923};
924
925static const struct gpmi_devdata gpmi_devdata_imx6sx = {
926 .type = IS_MX6SX,
927 .bch_max_ecc_strength = 62,
928 .max_chain_delay = 12000,
929 .clks = gpmi_clks_for_mx6,
930 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
931};
932
933static const char * const gpmi_clks_for_mx7d[] = {
934 "gpmi_io", "gpmi_bch_apb",
935};
936
937static const struct gpmi_devdata gpmi_devdata_imx7d = {
938 .type = IS_MX7D,
939 .bch_max_ecc_strength = 62,
940 .max_chain_delay = 12000,
941 .clks = gpmi_clks_for_mx7d,
942 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
943};
944
945static int acquire_register_block(struct gpmi_nand_data *this,
946 const char *res_name)
947{
948 struct platform_device *pdev = this->pdev;
949 struct resources *res = &this->resources;
950 struct resource *r;
951 void __iomem *p;
952
953 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
954 p = devm_ioremap_resource(&pdev->dev, r);
955 if (IS_ERR(p))
956 return PTR_ERR(p);
957
958 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
959 res->gpmi_regs = p;
960 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
961 res->bch_regs = p;
962 else
963 dev_err(this->dev, "unknown resource name : %s\n", res_name);
964
965 return 0;
966}
967
968static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
969{
970 struct platform_device *pdev = this->pdev;
971 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
972 struct resource *r;
973 int err;
974
975 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
976 if (!r) {
977 dev_err(this->dev, "Can't get resource for %s\n", res_name);
978 return -ENODEV;
979 }
980
981 err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
982 if (err)
983 dev_err(this->dev, "error requesting BCH IRQ\n");
984
985 return err;
986}
987
988static void release_dma_channels(struct gpmi_nand_data *this)
989{
990 unsigned int i;
991 for (i = 0; i < DMA_CHANS; i++)
992 if (this->dma_chans[i]) {
993 dma_release_channel(this->dma_chans[i]);
994 this->dma_chans[i] = NULL;
995 }
996}
997
998static int acquire_dma_channels(struct gpmi_nand_data *this)
999{
1000 struct platform_device *pdev = this->pdev;
1001 struct dma_chan *dma_chan;
1002 int ret = 0;
1003
1004
1005 dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
1006 if (IS_ERR(dma_chan)) {
1007 ret = dev_err_probe(this->dev, PTR_ERR(dma_chan),
1008 "DMA channel request failed\n");
1009 release_dma_channels(this);
1010 } else {
1011 this->dma_chans[0] = dma_chan;
1012 }
1013
1014 return ret;
1015}
1016
1017static int gpmi_get_clks(struct gpmi_nand_data *this)
1018{
1019 struct resources *r = &this->resources;
1020 struct clk *clk;
1021 int err, i;
1022
1023 for (i = 0; i < this->devdata->clks_count; i++) {
1024 clk = devm_clk_get(this->dev, this->devdata->clks[i]);
1025 if (IS_ERR(clk)) {
1026 err = PTR_ERR(clk);
1027 goto err_clock;
1028 }
1029
1030 r->clock[i] = clk;
1031 }
1032
1033 if (GPMI_IS_MX6(this))
1034
1035
1036
1037
1038
1039
1040 clk_set_rate(r->clock[0], 22000000);
1041
1042 return 0;
1043
1044err_clock:
1045 dev_dbg(this->dev, "failed in finding the clocks.\n");
1046 return err;
1047}
1048
1049static int acquire_resources(struct gpmi_nand_data *this)
1050{
1051 int ret;
1052
1053 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
1054 if (ret)
1055 goto exit_regs;
1056
1057 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
1058 if (ret)
1059 goto exit_regs;
1060
1061 ret = acquire_bch_irq(this, bch_irq);
1062 if (ret)
1063 goto exit_regs;
1064
1065 ret = acquire_dma_channels(this);
1066 if (ret)
1067 goto exit_regs;
1068
1069 ret = gpmi_get_clks(this);
1070 if (ret)
1071 goto exit_clock;
1072 return 0;
1073
1074exit_clock:
1075 release_dma_channels(this);
1076exit_regs:
1077 return ret;
1078}
1079
1080static void release_resources(struct gpmi_nand_data *this)
1081{
1082 release_dma_channels(this);
1083}
1084
1085static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
1086{
1087 struct device *dev = this->dev;
1088 struct bch_geometry *geo = &this->bch_geometry;
1089
1090 if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
1091 dma_free_coherent(dev, geo->auxiliary_size,
1092 this->auxiliary_virt,
1093 this->auxiliary_phys);
1094 kfree(this->data_buffer_dma);
1095 kfree(this->raw_buffer);
1096
1097 this->data_buffer_dma = NULL;
1098 this->raw_buffer = NULL;
1099}
1100
1101
1102static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
1103{
1104 struct bch_geometry *geo = &this->bch_geometry;
1105 struct device *dev = this->dev;
1106 struct mtd_info *mtd = nand_to_mtd(&this->nand);
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
1117 GFP_DMA | GFP_KERNEL);
1118 if (this->data_buffer_dma == NULL)
1119 goto error_alloc;
1120
1121 this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
1122 &this->auxiliary_phys, GFP_DMA);
1123 if (!this->auxiliary_virt)
1124 goto error_alloc;
1125
1126 this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
1127 if (!this->raw_buffer)
1128 goto error_alloc;
1129
1130 return 0;
1131
1132error_alloc:
1133 gpmi_free_dma_buffer(this);
1134 return -ENOMEM;
1135}
1136
1137
1138
1139
1140
1141
1142static void block_mark_swapping(struct gpmi_nand_data *this,
1143 void *payload, void *auxiliary)
1144{
1145 struct bch_geometry *nfc_geo = &this->bch_geometry;
1146 unsigned char *p;
1147 unsigned char *a;
1148 unsigned int bit;
1149 unsigned char mask;
1150 unsigned char from_data;
1151 unsigned char from_oob;
1152
1153 if (!this->swap_block_mark)
1154 return;
1155
1156
1157
1158
1159
1160 bit = nfc_geo->block_mark_bit_offset;
1161 p = payload + nfc_geo->block_mark_byte_offset;
1162 a = auxiliary;
1163
1164
1165
1166
1167
1168
1169
1170 from_data = (p[0] >> bit) | (p[1] << (8 - bit));
1171
1172
1173 from_oob = a[0];
1174
1175
1176 a[0] = from_data;
1177
1178 mask = (0x1 << bit) - 1;
1179 p[0] = (p[0] & mask) | (from_oob << bit);
1180
1181 mask = ~0 << bit;
1182 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1183}
1184
1185static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
1186 int last, int meta)
1187{
1188 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1189 struct bch_geometry *nfc_geo = &this->bch_geometry;
1190 struct mtd_info *mtd = nand_to_mtd(chip);
1191 int i;
1192 unsigned char *status;
1193 unsigned int max_bitflips = 0;
1194
1195
1196 status = this->auxiliary_virt + ALIGN(meta, 4);
1197
1198 for (i = first; i < last; i++, status++) {
1199 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1200 continue;
1201
1202 if (*status == STATUS_UNCORRECTABLE) {
1203 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1204 u8 *eccbuf = this->raw_buffer;
1205 int offset, bitoffset;
1206 int eccbytes;
1207 int flips;
1208
1209
1210 offset = nfc_geo->metadata_size * 8;
1211 offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1212 offset -= eccbits;
1213 bitoffset = offset % 8;
1214 eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1215 offset /= 8;
1216 eccbytes -= offset;
1217 nand_change_read_column_op(chip, offset, eccbuf,
1218 eccbytes, false);
1219
1220
1221
1222
1223
1224
1225
1226
1227 if (bitoffset)
1228 eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1229
1230 bitoffset = (bitoffset + eccbits) % 8;
1231 if (bitoffset)
1232 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245 if (i == 0) {
1246
1247 flips = nand_check_erased_ecc_chunk(
1248 buf + i * nfc_geo->ecc_chunk_size,
1249 nfc_geo->ecc_chunk_size,
1250 eccbuf, eccbytes,
1251 this->auxiliary_virt,
1252 nfc_geo->metadata_size,
1253 nfc_geo->ecc_strength);
1254 } else {
1255 flips = nand_check_erased_ecc_chunk(
1256 buf + i * nfc_geo->ecc_chunk_size,
1257 nfc_geo->ecc_chunk_size,
1258 eccbuf, eccbytes,
1259 NULL, 0,
1260 nfc_geo->ecc_strength);
1261 }
1262
1263 if (flips > 0) {
1264 max_bitflips = max_t(unsigned int, max_bitflips,
1265 flips);
1266 mtd->ecc_stats.corrected += flips;
1267 continue;
1268 }
1269
1270 mtd->ecc_stats.failed++;
1271 continue;
1272 }
1273
1274 mtd->ecc_stats.corrected += *status;
1275 max_bitflips = max_t(unsigned int, max_bitflips, *status);
1276 }
1277
1278 return max_bitflips;
1279}
1280
1281static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1282{
1283 struct bch_geometry *geo = &this->bch_geometry;
1284 unsigned int ecc_strength = geo->ecc_strength >> 1;
1285 unsigned int gf_len = geo->gf_len;
1286 unsigned int block_size = geo->ecc_chunk_size;
1287
1288 this->bch_flashlayout0 =
1289 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1290 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1291 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1292 BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1293 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
1294
1295 this->bch_flashlayout1 =
1296 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1297 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1298 BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1299 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
1300}
1301
1302static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1303 int oob_required, int page)
1304{
1305 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1306 struct mtd_info *mtd = nand_to_mtd(chip);
1307 struct bch_geometry *geo = &this->bch_geometry;
1308 unsigned int max_bitflips;
1309 int ret;
1310
1311 gpmi_bch_layout_std(this);
1312 this->bch = true;
1313
1314 ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1315 if (ret)
1316 return ret;
1317
1318 max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1319 geo->ecc_chunk_count,
1320 geo->auxiliary_status_offset);
1321
1322
1323 block_mark_swapping(this, buf, this->auxiliary_virt);
1324
1325 if (oob_required) {
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336 memset(chip->oob_poi, ~0, mtd->oobsize);
1337 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
1338 }
1339
1340 return max_bitflips;
1341}
1342
1343
1344static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1345 uint32_t len, uint8_t *buf, int page)
1346{
1347 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1348 struct bch_geometry *geo = &this->bch_geometry;
1349 int size = chip->ecc.size;
1350 int meta, n, page_size;
1351 unsigned int max_bitflips;
1352 unsigned int ecc_strength;
1353 int first, last, marker_pos;
1354 int ecc_parity_size;
1355 int col = 0;
1356 int ret;
1357
1358
1359 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1360
1361
1362 first = offs / size;
1363 last = (offs + len - 1) / size;
1364
1365 if (this->swap_block_mark) {
1366
1367
1368
1369
1370
1371
1372
1373 marker_pos = geo->block_mark_byte_offset / size;
1374 if (last >= marker_pos && first <= marker_pos) {
1375 dev_dbg(this->dev,
1376 "page:%d, first:%d, last:%d, marker at:%d\n",
1377 page, first, last, marker_pos);
1378 return gpmi_ecc_read_page(chip, buf, 0, page);
1379 }
1380 }
1381
1382 meta = geo->metadata_size;
1383 if (first) {
1384 col = meta + (size + ecc_parity_size) * first;
1385 meta = 0;
1386 buf = buf + first * size;
1387 }
1388
1389 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1390
1391 n = last - first + 1;
1392 page_size = meta + (size + ecc_parity_size) * n;
1393 ecc_strength = geo->ecc_strength >> 1;
1394
1395 this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
1396 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1397 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1398 BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1399 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
1400
1401 this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1402 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1403 BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1404 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
1405
1406 this->bch = true;
1407
1408 ret = nand_read_page_op(chip, page, col, buf, page_size);
1409 if (ret)
1410 return ret;
1411
1412 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1413 page, offs, len, col, first, n, page_size);
1414
1415 max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
1416
1417 return max_bitflips;
1418}
1419
1420static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1421 int oob_required, int page)
1422{
1423 struct mtd_info *mtd = nand_to_mtd(chip);
1424 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1425 struct bch_geometry *nfc_geo = &this->bch_geometry;
1426 int ret;
1427
1428 dev_dbg(this->dev, "ecc write page.\n");
1429
1430 gpmi_bch_layout_std(this);
1431 this->bch = true;
1432
1433 memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
1434
1435 if (this->swap_block_mark) {
1436
1437
1438
1439
1440 memcpy(this->data_buffer_dma, buf, mtd->writesize);
1441 buf = this->data_buffer_dma;
1442 block_mark_swapping(this, this->data_buffer_dma,
1443 this->auxiliary_virt);
1444 }
1445
1446 ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
1447
1448 return ret;
1449}
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1512{
1513 struct mtd_info *mtd = nand_to_mtd(chip);
1514 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1515 int ret;
1516
1517
1518 memset(chip->oob_poi, ~0, mtd->oobsize);
1519
1520
1521 ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1522 mtd->oobsize);
1523 if (ret)
1524 return ret;
1525
1526
1527
1528
1529
1530
1531 if (GPMI_IS_MX23(this)) {
1532
1533 ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1534 if (ret)
1535 return ret;
1536 }
1537
1538 return 0;
1539}
1540
1541static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
1542{
1543 struct mtd_info *mtd = nand_to_mtd(chip);
1544 struct mtd_oob_region of = { };
1545
1546
1547 mtd_ooblayout_free(mtd, 0, &of);
1548 if (!of.length)
1549 return -EPERM;
1550
1551 if (!nand_is_slc(chip))
1552 return -EPERM;
1553
1554 return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1555 chip->oob_poi + of.offset, of.length);
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1571 int oob_required, int page)
1572{
1573 struct mtd_info *mtd = nand_to_mtd(chip);
1574 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1575 struct bch_geometry *nfc_geo = &this->bch_geometry;
1576 int eccsize = nfc_geo->ecc_chunk_size;
1577 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1578 u8 *tmp_buf = this->raw_buffer;
1579 size_t src_bit_off;
1580 size_t oob_bit_off;
1581 size_t oob_byte_off;
1582 uint8_t *oob = chip->oob_poi;
1583 int step;
1584 int ret;
1585
1586 ret = nand_read_page_op(chip, page, 0, tmp_buf,
1587 mtd->writesize + mtd->oobsize);
1588 if (ret)
1589 return ret;
1590
1591
1592
1593
1594
1595
1596
1597
1598 if (this->swap_block_mark)
1599 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1600
1601
1602
1603
1604
1605 if (oob_required)
1606 memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1607
1608 oob_bit_off = nfc_geo->metadata_size * 8;
1609 src_bit_off = oob_bit_off;
1610
1611
1612 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1613 if (buf)
1614 nand_extract_bits(buf, step * eccsize, tmp_buf,
1615 src_bit_off, eccsize * 8);
1616 src_bit_off += eccsize * 8;
1617
1618
1619 if (step == nfc_geo->ecc_chunk_count - 1 &&
1620 (oob_bit_off + eccbits) % 8)
1621 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1622
1623 if (oob_required)
1624 nand_extract_bits(oob, oob_bit_off, tmp_buf,
1625 src_bit_off, eccbits);
1626
1627 src_bit_off += eccbits;
1628 oob_bit_off += eccbits;
1629 }
1630
1631 if (oob_required) {
1632 oob_byte_off = oob_bit_off / 8;
1633
1634 if (oob_byte_off < mtd->oobsize)
1635 memcpy(oob + oob_byte_off,
1636 tmp_buf + mtd->writesize + oob_byte_off,
1637 mtd->oobsize - oob_byte_off);
1638 }
1639
1640 return 0;
1641}
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1656 int oob_required, int page)
1657{
1658 struct mtd_info *mtd = nand_to_mtd(chip);
1659 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1660 struct bch_geometry *nfc_geo = &this->bch_geometry;
1661 int eccsize = nfc_geo->ecc_chunk_size;
1662 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1663 u8 *tmp_buf = this->raw_buffer;
1664 uint8_t *oob = chip->oob_poi;
1665 size_t dst_bit_off;
1666 size_t oob_bit_off;
1667 size_t oob_byte_off;
1668 int step;
1669
1670
1671
1672
1673
1674
1675 if (!buf || !oob_required)
1676 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1677
1678
1679
1680
1681
1682 memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1683 oob_bit_off = nfc_geo->metadata_size * 8;
1684 dst_bit_off = oob_bit_off;
1685
1686
1687 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1688 if (buf)
1689 nand_extract_bits(tmp_buf, dst_bit_off, buf,
1690 step * eccsize * 8, eccsize * 8);
1691 dst_bit_off += eccsize * 8;
1692
1693
1694 if (step == nfc_geo->ecc_chunk_count - 1 &&
1695 (oob_bit_off + eccbits) % 8)
1696 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1697
1698 if (oob_required)
1699 nand_extract_bits(tmp_buf, dst_bit_off, oob,
1700 oob_bit_off, eccbits);
1701
1702 dst_bit_off += eccbits;
1703 oob_bit_off += eccbits;
1704 }
1705
1706 oob_byte_off = oob_bit_off / 8;
1707
1708 if (oob_required && oob_byte_off < mtd->oobsize)
1709 memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1710 oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1711
1712
1713
1714
1715
1716
1717
1718
1719 if (this->swap_block_mark)
1720 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1721
1722 return nand_prog_page_op(chip, page, 0, tmp_buf,
1723 mtd->writesize + mtd->oobsize);
1724}
1725
1726static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
1727{
1728 return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
1729}
1730
1731static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
1732{
1733 return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
1734}
1735
1736static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
1737{
1738 struct mtd_info *mtd = nand_to_mtd(chip);
1739 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1740 int ret = 0;
1741 uint8_t *block_mark;
1742 int column, page, chipnr;
1743
1744 chipnr = (int)(ofs >> chip->chip_shift);
1745 nand_select_target(chip, chipnr);
1746
1747 column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
1748
1749
1750 block_mark = this->data_buffer_dma;
1751 block_mark[0] = 0;
1752
1753
1754 page = (int)(ofs >> chip->page_shift);
1755
1756 ret = nand_prog_page_op(chip, page, column, block_mark, 1);
1757
1758 nand_deselect_target(chip);
1759
1760 return ret;
1761}
1762
1763static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1764{
1765 struct boot_rom_geometry *geometry = &this->rom_geometry;
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775 geometry->stride_size_in_pages = 64;
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785 geometry->search_area_stride_exponent = 2;
1786 return 0;
1787}
1788
1789static const char *fingerprint = "STMP";
1790static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1791{
1792 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1793 struct device *dev = this->dev;
1794 struct nand_chip *chip = &this->nand;
1795 unsigned int search_area_size_in_strides;
1796 unsigned int stride;
1797 unsigned int page;
1798 u8 *buffer = nand_get_data_buf(chip);
1799 int found_an_ncb_fingerprint = false;
1800 int ret;
1801
1802
1803 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1804
1805 nand_select_target(chip, 0);
1806
1807
1808
1809
1810 dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1811
1812 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1813
1814 page = stride * rom_geo->stride_size_in_pages;
1815
1816 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1817
1818
1819
1820
1821
1822 ret = nand_read_page_op(chip, page, 12, buffer,
1823 strlen(fingerprint));
1824 if (ret)
1825 continue;
1826
1827
1828 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1829 found_an_ncb_fingerprint = true;
1830 break;
1831 }
1832
1833 }
1834
1835 nand_deselect_target(chip);
1836
1837 if (found_an_ncb_fingerprint)
1838 dev_dbg(dev, "\tFound a fingerprint\n");
1839 else
1840 dev_dbg(dev, "\tNo fingerprint found\n");
1841 return found_an_ncb_fingerprint;
1842}
1843
1844
1845static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1846{
1847 struct device *dev = this->dev;
1848 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1849 struct nand_chip *chip = &this->nand;
1850 struct mtd_info *mtd = nand_to_mtd(chip);
1851 unsigned int block_size_in_pages;
1852 unsigned int search_area_size_in_strides;
1853 unsigned int search_area_size_in_pages;
1854 unsigned int search_area_size_in_blocks;
1855 unsigned int block;
1856 unsigned int stride;
1857 unsigned int page;
1858 u8 *buffer = nand_get_data_buf(chip);
1859 int status;
1860
1861
1862 block_size_in_pages = mtd->erasesize / mtd->writesize;
1863 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1864 search_area_size_in_pages = search_area_size_in_strides *
1865 rom_geo->stride_size_in_pages;
1866 search_area_size_in_blocks =
1867 (search_area_size_in_pages + (block_size_in_pages - 1)) /
1868 block_size_in_pages;
1869
1870 dev_dbg(dev, "Search Area Geometry :\n");
1871 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
1872 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
1873 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
1874
1875 nand_select_target(chip, 0);
1876
1877
1878 dev_dbg(dev, "Erasing the search area...\n");
1879
1880 for (block = 0; block < search_area_size_in_blocks; block++) {
1881
1882 dev_dbg(dev, "\tErasing block 0x%x\n", block);
1883 status = nand_erase_op(chip, block);
1884 if (status)
1885 dev_err(dev, "[%s] Erase failed.\n", __func__);
1886 }
1887
1888
1889 memset(buffer, ~0, mtd->writesize);
1890 memcpy(buffer + 12, fingerprint, strlen(fingerprint));
1891
1892
1893 dev_dbg(dev, "Writing NCB fingerprints...\n");
1894 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1895
1896 page = stride * rom_geo->stride_size_in_pages;
1897
1898
1899 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1900
1901 status = chip->ecc.write_page_raw(chip, buffer, 0, page);
1902 if (status)
1903 dev_err(dev, "[%s] Write failed.\n", __func__);
1904 }
1905
1906 nand_deselect_target(chip);
1907
1908 return 0;
1909}
1910
1911static int mx23_boot_init(struct gpmi_nand_data *this)
1912{
1913 struct device *dev = this->dev;
1914 struct nand_chip *chip = &this->nand;
1915 struct mtd_info *mtd = nand_to_mtd(chip);
1916 unsigned int block_count;
1917 unsigned int block;
1918 int chipnr;
1919 int page;
1920 loff_t byte;
1921 uint8_t block_mark;
1922 int ret = 0;
1923
1924
1925
1926
1927
1928
1929
1930 if (mx23_check_transcription_stamp(this))
1931 return 0;
1932
1933
1934
1935
1936
1937 dev_dbg(dev, "Transcribing bad block marks...\n");
1938
1939
1940 block_count = nanddev_eraseblocks_per_target(&chip->base);
1941
1942
1943
1944
1945
1946 for (block = 0; block < block_count; block++) {
1947
1948
1949
1950
1951 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
1952 page = block << (chip->phys_erase_shift - chip->page_shift);
1953 byte = block << chip->phys_erase_shift;
1954
1955
1956 nand_select_target(chip, chipnr);
1957 ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
1958 1);
1959 nand_deselect_target(chip);
1960
1961 if (ret)
1962 continue;
1963
1964
1965
1966
1967
1968
1969 if (block_mark != 0xff) {
1970 dev_dbg(dev, "Transcribing mark in block %u\n", block);
1971 ret = chip->legacy.block_markbad(chip, byte);
1972 if (ret)
1973 dev_err(dev,
1974 "Failed to mark block bad with ret %d\n",
1975 ret);
1976 }
1977 }
1978
1979
1980 mx23_write_transcription_stamp(this);
1981 return 0;
1982}
1983
1984static int nand_boot_init(struct gpmi_nand_data *this)
1985{
1986 nand_boot_set_geometry(this);
1987
1988
1989 if (GPMI_IS_MX23(this))
1990 return mx23_boot_init(this);
1991 return 0;
1992}
1993
1994static int gpmi_set_geometry(struct gpmi_nand_data *this)
1995{
1996 int ret;
1997
1998
1999 gpmi_free_dma_buffer(this);
2000
2001
2002 ret = bch_set_geometry(this);
2003 if (ret) {
2004 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
2005 return ret;
2006 }
2007
2008
2009 return gpmi_alloc_dma_buffer(this);
2010}
2011
2012static int gpmi_init_last(struct gpmi_nand_data *this)
2013{
2014 struct nand_chip *chip = &this->nand;
2015 struct mtd_info *mtd = nand_to_mtd(chip);
2016 struct nand_ecc_ctrl *ecc = &chip->ecc;
2017 struct bch_geometry *bch_geo = &this->bch_geometry;
2018 int ret;
2019
2020
2021 ret = gpmi_set_geometry(this);
2022 if (ret)
2023 return ret;
2024
2025
2026 ecc->read_page = gpmi_ecc_read_page;
2027 ecc->write_page = gpmi_ecc_write_page;
2028 ecc->read_oob = gpmi_ecc_read_oob;
2029 ecc->write_oob = gpmi_ecc_write_oob;
2030 ecc->read_page_raw = gpmi_ecc_read_page_raw;
2031 ecc->write_page_raw = gpmi_ecc_write_page_raw;
2032 ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
2033 ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
2034 ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2035 ecc->size = bch_geo->ecc_chunk_size;
2036 ecc->strength = bch_geo->ecc_strength;
2037 mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
2038
2039
2040
2041
2042
2043
2044 if (GPMI_IS_MX6(this) &&
2045 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
2046 ecc->read_subpage = gpmi_ecc_read_subpage;
2047 chip->options |= NAND_SUBPAGE_READ;
2048 }
2049
2050 return 0;
2051}
2052
2053static int gpmi_nand_attach_chip(struct nand_chip *chip)
2054{
2055 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2056 int ret;
2057
2058 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2059 chip->bbt_options |= NAND_BBT_NO_OOB;
2060
2061 if (of_property_read_bool(this->dev->of_node,
2062 "fsl,no-blockmark-swap"))
2063 this->swap_block_mark = false;
2064 }
2065 dev_dbg(this->dev, "Blockmark swapping %sabled\n",
2066 this->swap_block_mark ? "en" : "dis");
2067
2068 ret = gpmi_init_last(this);
2069 if (ret)
2070 return ret;
2071
2072 chip->options |= NAND_SKIP_BBTSCAN;
2073
2074 return 0;
2075}
2076
2077static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2078{
2079 struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2080
2081 this->ntransfers++;
2082
2083 if (this->ntransfers == GPMI_MAX_TRANSFERS)
2084 return NULL;
2085
2086 return transfer;
2087}
2088
2089static struct dma_async_tx_descriptor *gpmi_chain_command(
2090 struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2091{
2092 struct dma_chan *channel = get_dma_chan(this);
2093 struct dma_async_tx_descriptor *desc;
2094 struct gpmi_transfer *transfer;
2095 int chip = this->nand.cur_cs;
2096 u32 pio[3];
2097
2098
2099 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2100 | BM_GPMI_CTRL0_WORD_LENGTH
2101 | BF_GPMI_CTRL0_CS(chip, this)
2102 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2103 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2104 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2105 | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2106 pio[1] = 0;
2107 pio[2] = 0;
2108 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2109 DMA_TRANS_NONE, 0);
2110 if (!desc)
2111 return NULL;
2112
2113 transfer = get_next_transfer(this);
2114 if (!transfer)
2115 return NULL;
2116
2117 transfer->cmdbuf[0] = cmd;
2118 if (naddr)
2119 memcpy(&transfer->cmdbuf[1], addr, naddr);
2120
2121 sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2122 dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2123
2124 transfer->direction = DMA_TO_DEVICE;
2125
2126 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2127 MXS_DMA_CTRL_WAIT4END);
2128 return desc;
2129}
2130
2131static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2132 struct gpmi_nand_data *this)
2133{
2134 struct dma_chan *channel = get_dma_chan(this);
2135 u32 pio[2];
2136
2137 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2138 | BM_GPMI_CTRL0_WORD_LENGTH
2139 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2140 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2141 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2142 | BF_GPMI_CTRL0_XFER_COUNT(0);
2143 pio[1] = 0;
2144
2145 return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2146 MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2147}
2148
2149static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2150 struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2151{
2152 struct dma_async_tx_descriptor *desc;
2153 struct dma_chan *channel = get_dma_chan(this);
2154 struct gpmi_transfer *transfer;
2155 u32 pio[6] = {};
2156
2157 transfer = get_next_transfer(this);
2158 if (!transfer)
2159 return NULL;
2160
2161 transfer->direction = DMA_FROM_DEVICE;
2162
2163 *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2164 DMA_FROM_DEVICE);
2165
2166 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2167 | BM_GPMI_CTRL0_WORD_LENGTH
2168 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2169 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2170 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2171 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2172
2173 if (this->bch) {
2174 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2175 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2176 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2177 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2178 pio[3] = raw_len;
2179 pio[4] = transfer->sgl.dma_address;
2180 pio[5] = this->auxiliary_phys;
2181 }
2182
2183 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2184 DMA_TRANS_NONE, 0);
2185 if (!desc)
2186 return NULL;
2187
2188 if (!this->bch)
2189 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2190 DMA_DEV_TO_MEM,
2191 MXS_DMA_CTRL_WAIT4END);
2192
2193 return desc;
2194}
2195
2196static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2197 struct gpmi_nand_data *this, const void *buf, int raw_len)
2198{
2199 struct dma_chan *channel = get_dma_chan(this);
2200 struct dma_async_tx_descriptor *desc;
2201 struct gpmi_transfer *transfer;
2202 u32 pio[6] = {};
2203
2204 transfer = get_next_transfer(this);
2205 if (!transfer)
2206 return NULL;
2207
2208 transfer->direction = DMA_TO_DEVICE;
2209
2210 prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2211
2212 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2213 | BM_GPMI_CTRL0_WORD_LENGTH
2214 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2215 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2216 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2217 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2218
2219 if (this->bch) {
2220 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2221 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2222 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2223 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2224 pio[3] = raw_len;
2225 pio[4] = transfer->sgl.dma_address;
2226 pio[5] = this->auxiliary_phys;
2227 }
2228
2229 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2230 DMA_TRANS_NONE,
2231 (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2232 if (!desc)
2233 return NULL;
2234
2235 if (!this->bch)
2236 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2237 DMA_MEM_TO_DEV,
2238 MXS_DMA_CTRL_WAIT4END);
2239
2240 return desc;
2241}
2242
2243static int gpmi_nfc_exec_op(struct nand_chip *chip,
2244 const struct nand_operation *op,
2245 bool check_only)
2246{
2247 const struct nand_op_instr *instr;
2248 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2249 struct dma_async_tx_descriptor *desc = NULL;
2250 int i, ret, buf_len = 0, nbufs = 0;
2251 u8 cmd = 0;
2252 void *buf_read = NULL;
2253 const void *buf_write = NULL;
2254 bool direct = false;
2255 struct completion *completion;
2256 unsigned long to;
2257
2258 if (check_only)
2259 return 0;
2260
2261 this->ntransfers = 0;
2262 for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2263 this->transfers[i].direction = DMA_NONE;
2264
2265 ret = pm_runtime_get_sync(this->dev);
2266 if (ret < 0)
2267 return ret;
2268
2269
2270
2271
2272
2273
2274
2275 if (this->hw.must_apply_timings) {
2276 this->hw.must_apply_timings = false;
2277 gpmi_nfc_apply_timings(this);
2278 }
2279
2280 dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2281
2282 for (i = 0; i < op->ninstrs; i++) {
2283 instr = &op->instrs[i];
2284
2285 nand_op_trace(" ", instr);
2286
2287 switch (instr->type) {
2288 case NAND_OP_WAITRDY_INSTR:
2289 desc = gpmi_chain_wait_ready(this);
2290 break;
2291 case NAND_OP_CMD_INSTR:
2292 cmd = instr->ctx.cmd.opcode;
2293
2294
2295
2296
2297
2298 if (i + 1 != op->ninstrs &&
2299 op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2300 continue;
2301
2302 desc = gpmi_chain_command(this, cmd, NULL, 0);
2303
2304 break;
2305 case NAND_OP_ADDR_INSTR:
2306 desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2307 instr->ctx.addr.naddrs);
2308 break;
2309 case NAND_OP_DATA_OUT_INSTR:
2310 buf_write = instr->ctx.data.buf.out;
2311 buf_len = instr->ctx.data.len;
2312 nbufs++;
2313
2314 desc = gpmi_chain_data_write(this, buf_write, buf_len);
2315
2316 break;
2317 case NAND_OP_DATA_IN_INSTR:
2318 if (!instr->ctx.data.len)
2319 break;
2320 buf_read = instr->ctx.data.buf.in;
2321 buf_len = instr->ctx.data.len;
2322 nbufs++;
2323
2324 desc = gpmi_chain_data_read(this, buf_read, buf_len,
2325 &direct);
2326 break;
2327 }
2328
2329 if (!desc) {
2330 ret = -ENXIO;
2331 goto unmap;
2332 }
2333 }
2334
2335 dev_dbg(this->dev, "%s setup done\n", __func__);
2336
2337 if (nbufs > 1) {
2338 dev_err(this->dev, "Multiple data instructions not supported\n");
2339 ret = -EINVAL;
2340 goto unmap;
2341 }
2342
2343 if (this->bch) {
2344 writel(this->bch_flashlayout0,
2345 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2346 writel(this->bch_flashlayout1,
2347 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2348 }
2349
2350 if (this->bch && buf_read) {
2351 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2352 this->resources.bch_regs + HW_BCH_CTRL_SET);
2353 completion = &this->bch_done;
2354 } else {
2355 desc->callback = dma_irq_callback;
2356 desc->callback_param = this;
2357 completion = &this->dma_done;
2358 }
2359
2360 init_completion(completion);
2361
2362 dmaengine_submit(desc);
2363 dma_async_issue_pending(get_dma_chan(this));
2364
2365 to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
2366 if (!to) {
2367 dev_err(this->dev, "DMA timeout, last DMA\n");
2368 gpmi_dump_info(this);
2369 ret = -ETIMEDOUT;
2370 goto unmap;
2371 }
2372
2373 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2374 this->resources.bch_regs + HW_BCH_CTRL_CLR);
2375 gpmi_clear_bch(this);
2376
2377 ret = 0;
2378
2379unmap:
2380 for (i = 0; i < this->ntransfers; i++) {
2381 struct gpmi_transfer *transfer = &this->transfers[i];
2382
2383 if (transfer->direction != DMA_NONE)
2384 dma_unmap_sg(this->dev, &transfer->sgl, 1,
2385 transfer->direction);
2386 }
2387
2388 if (!ret && buf_read && !direct)
2389 memcpy(buf_read, this->data_buffer_dma,
2390 gpmi_raw_len_to_len(this, buf_len));
2391
2392 this->bch = false;
2393
2394 pm_runtime_mark_last_busy(this->dev);
2395 pm_runtime_put_autosuspend(this->dev);
2396
2397 return ret;
2398}
2399
2400static const struct nand_controller_ops gpmi_nand_controller_ops = {
2401 .attach_chip = gpmi_nand_attach_chip,
2402 .setup_interface = gpmi_setup_interface,
2403 .exec_op = gpmi_nfc_exec_op,
2404};
2405
2406static int gpmi_nand_init(struct gpmi_nand_data *this)
2407{
2408 struct nand_chip *chip = &this->nand;
2409 struct mtd_info *mtd = nand_to_mtd(chip);
2410 int ret;
2411
2412
2413 mtd->name = "gpmi-nand";
2414 mtd->dev.parent = this->dev;
2415
2416
2417 nand_set_controller_data(chip, this);
2418 nand_set_flash_node(chip, this->pdev->dev.of_node);
2419 chip->legacy.block_markbad = gpmi_block_markbad;
2420 chip->badblock_pattern = &gpmi_bbt_descr;
2421 chip->options |= NAND_NO_SUBPAGE_WRITE;
2422
2423
2424 this->swap_block_mark = !GPMI_IS_MX23(this);
2425
2426
2427
2428
2429
2430 this->bch_geometry.payload_size = 1024;
2431 this->bch_geometry.auxiliary_size = 128;
2432 ret = gpmi_alloc_dma_buffer(this);
2433 if (ret)
2434 goto err_out;
2435
2436 nand_controller_init(&this->base);
2437 this->base.ops = &gpmi_nand_controller_ops;
2438 chip->controller = &this->base;
2439
2440 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
2441 if (ret)
2442 goto err_out;
2443
2444 ret = nand_boot_init(this);
2445 if (ret)
2446 goto err_nand_cleanup;
2447 ret = nand_create_bbt(chip);
2448 if (ret)
2449 goto err_nand_cleanup;
2450
2451 ret = mtd_device_register(mtd, NULL, 0);
2452 if (ret)
2453 goto err_nand_cleanup;
2454 return 0;
2455
2456err_nand_cleanup:
2457 nand_cleanup(chip);
2458err_out:
2459 gpmi_free_dma_buffer(this);
2460 return ret;
2461}
2462
2463static const struct of_device_id gpmi_nand_id_table[] = {
2464 {
2465 .compatible = "fsl,imx23-gpmi-nand",
2466 .data = &gpmi_devdata_imx23,
2467 }, {
2468 .compatible = "fsl,imx28-gpmi-nand",
2469 .data = &gpmi_devdata_imx28,
2470 }, {
2471 .compatible = "fsl,imx6q-gpmi-nand",
2472 .data = &gpmi_devdata_imx6q,
2473 }, {
2474 .compatible = "fsl,imx6sx-gpmi-nand",
2475 .data = &gpmi_devdata_imx6sx,
2476 }, {
2477 .compatible = "fsl,imx7d-gpmi-nand",
2478 .data = &gpmi_devdata_imx7d,
2479 }, {}
2480};
2481MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
2482
2483static int gpmi_nand_probe(struct platform_device *pdev)
2484{
2485 struct gpmi_nand_data *this;
2486 const struct of_device_id *of_id;
2487 int ret;
2488
2489 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
2490 if (!this)
2491 return -ENOMEM;
2492
2493 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
2494 if (of_id) {
2495 this->devdata = of_id->data;
2496 } else {
2497 dev_err(&pdev->dev, "Failed to find the right device id.\n");
2498 return -ENODEV;
2499 }
2500
2501 platform_set_drvdata(pdev, this);
2502 this->pdev = pdev;
2503 this->dev = &pdev->dev;
2504
2505 ret = acquire_resources(this);
2506 if (ret)
2507 goto exit_acquire_resources;
2508
2509 ret = __gpmi_enable_clk(this, true);
2510 if (ret)
2511 goto exit_acquire_resources;
2512
2513 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2514 pm_runtime_use_autosuspend(&pdev->dev);
2515 pm_runtime_set_active(&pdev->dev);
2516 pm_runtime_enable(&pdev->dev);
2517 pm_runtime_get_sync(&pdev->dev);
2518
2519 ret = gpmi_init(this);
2520 if (ret)
2521 goto exit_nfc_init;
2522
2523 ret = gpmi_nand_init(this);
2524 if (ret)
2525 goto exit_nfc_init;
2526
2527 pm_runtime_mark_last_busy(&pdev->dev);
2528 pm_runtime_put_autosuspend(&pdev->dev);
2529
2530 dev_info(this->dev, "driver registered.\n");
2531
2532 return 0;
2533
2534exit_nfc_init:
2535 pm_runtime_put(&pdev->dev);
2536 pm_runtime_disable(&pdev->dev);
2537 release_resources(this);
2538exit_acquire_resources:
2539
2540 return ret;
2541}
2542
2543static int gpmi_nand_remove(struct platform_device *pdev)
2544{
2545 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2546 struct nand_chip *chip = &this->nand;
2547 int ret;
2548
2549 pm_runtime_put_sync(&pdev->dev);
2550 pm_runtime_disable(&pdev->dev);
2551
2552 ret = mtd_device_unregister(nand_to_mtd(chip));
2553 WARN_ON(ret);
2554 nand_cleanup(chip);
2555 gpmi_free_dma_buffer(this);
2556 release_resources(this);
2557 return 0;
2558}
2559
2560#ifdef CONFIG_PM_SLEEP
2561static int gpmi_pm_suspend(struct device *dev)
2562{
2563 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2564
2565 release_dma_channels(this);
2566 return 0;
2567}
2568
2569static int gpmi_pm_resume(struct device *dev)
2570{
2571 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2572 int ret;
2573
2574 ret = acquire_dma_channels(this);
2575 if (ret < 0)
2576 return ret;
2577
2578
2579 ret = gpmi_init(this);
2580 if (ret) {
2581 dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2582 return ret;
2583 }
2584
2585
2586 if (this->hw.clk_rate)
2587 this->hw.must_apply_timings = true;
2588
2589
2590 ret = bch_set_geometry(this);
2591 if (ret) {
2592 dev_err(this->dev, "Error setting BCH : %d\n", ret);
2593 return ret;
2594 }
2595
2596 return 0;
2597}
2598#endif
2599
2600static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2601{
2602 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2603
2604 return __gpmi_enable_clk(this, false);
2605}
2606
2607static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2608{
2609 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2610
2611 return __gpmi_enable_clk(this, true);
2612}
2613
2614static const struct dev_pm_ops gpmi_pm_ops = {
2615 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2616 SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
2617};
2618
2619static struct platform_driver gpmi_nand_driver = {
2620 .driver = {
2621 .name = "gpmi-nand",
2622 .pm = &gpmi_pm_ops,
2623 .of_match_table = gpmi_nand_id_table,
2624 },
2625 .probe = gpmi_nand_probe,
2626 .remove = gpmi_nand_remove,
2627};
2628module_platform_driver(gpmi_nand_driver);
2629
2630MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2631MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2632MODULE_LICENSE("GPL");
2633