1
2
3
4
5
6
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/slab.h>
11#include <linux/sched/task_stack.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
14#include <linux/mtd/partitions.h>
15#include <linux/of.h>
16#include <linux/of_device.h>
17#include <linux/pm_runtime.h>
18#include <linux/dma/mxs-dma.h>
19#include "gpmi-nand.h"
20#include "gpmi-regs.h"
21#include "bch-regs.h"
22
23
24#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
25#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
26#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
27
28
29#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
30
31#define MXS_SET_ADDR 0x4
32#define MXS_CLR_ADDR 0x8
33
34
35
36
37
38static int clear_poll_bit(void __iomem *addr, u32 mask)
39{
40 int timeout = 0x400;
41
42
43 writel(mask, addr + MXS_CLR_ADDR);
44
45
46
47
48
49 udelay(1);
50
51
52 while ((readl(addr) & mask) && --timeout)
53 ;
54
55 return !timeout;
56}
57
58#define MODULE_CLKGATE (1 << 30)
59#define MODULE_SFTRST (1 << 31)
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
78{
79 int ret;
80 int timeout = 0x400;
81
82
83 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
84 if (unlikely(ret))
85 goto error;
86
87
88 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
89
90 if (!just_enable) {
91
92 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
93 udelay(1);
94
95
96 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
97 ;
98 if (unlikely(!timeout))
99 goto error;
100 }
101
102
103 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
104 if (unlikely(ret))
105 goto error;
106
107
108 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
109 if (unlikely(ret))
110 goto error;
111
112 return 0;
113
114error:
115 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
116 return -ETIMEDOUT;
117}
118
119static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
120{
121 struct clk *clk;
122 int ret;
123 int i;
124
125 for (i = 0; i < GPMI_CLK_MAX; i++) {
126 clk = this->resources.clock[i];
127 if (!clk)
128 break;
129
130 if (v) {
131 ret = clk_prepare_enable(clk);
132 if (ret)
133 goto err_clk;
134 } else {
135 clk_disable_unprepare(clk);
136 }
137 }
138 return 0;
139
140err_clk:
141 for (; i > 0; i--)
142 clk_disable_unprepare(this->resources.clock[i - 1]);
143 return ret;
144}
145
146static int gpmi_init(struct gpmi_nand_data *this)
147{
148 struct resources *r = &this->resources;
149 int ret;
150
151 ret = pm_runtime_get_sync(this->dev);
152 if (ret < 0)
153 return ret;
154
155 ret = gpmi_reset_block(r->gpmi_regs, false);
156 if (ret)
157 goto err_out;
158
159
160
161
162
163 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
164 if (ret)
165 goto err_out;
166
167
168 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
169
170
171 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
172 r->gpmi_regs + HW_GPMI_CTRL1_SET);
173
174
175 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
176
177
178 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
179
180
181
182
183
184 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
185
186err_out:
187 pm_runtime_mark_last_busy(this->dev);
188 pm_runtime_put_autosuspend(this->dev);
189 return ret;
190}
191
192
193static void gpmi_dump_info(struct gpmi_nand_data *this)
194{
195 struct resources *r = &this->resources;
196 struct bch_geometry *geo = &this->bch_geometry;
197 u32 reg;
198 int i;
199
200 dev_err(this->dev, "Show GPMI registers :\n");
201 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
202 reg = readl(r->gpmi_regs + i * 0x10);
203 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
204 }
205
206
207 dev_err(this->dev, "Show BCH registers :\n");
208 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
209 reg = readl(r->bch_regs + i * 0x10);
210 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
211 }
212 dev_err(this->dev, "BCH Geometry :\n"
213 "GF length : %u\n"
214 "ECC Strength : %u\n"
215 "Page Size in Bytes : %u\n"
216 "Metadata Size in Bytes : %u\n"
217 "ECC Chunk Size in Bytes: %u\n"
218 "ECC Chunk Count : %u\n"
219 "Payload Size in Bytes : %u\n"
220 "Auxiliary Size in Bytes: %u\n"
221 "Auxiliary Status Offset: %u\n"
222 "Block Mark Byte Offset : %u\n"
223 "Block Mark Bit Offset : %u\n",
224 geo->gf_len,
225 geo->ecc_strength,
226 geo->page_size,
227 geo->metadata_size,
228 geo->ecc_chunk_size,
229 geo->ecc_chunk_count,
230 geo->payload_size,
231 geo->auxiliary_size,
232 geo->auxiliary_status_offset,
233 geo->block_mark_byte_offset,
234 geo->block_mark_bit_offset);
235}
236
237static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
238{
239 struct bch_geometry *geo = &this->bch_geometry;
240
241
242 if (GPMI_IS_MXS(this)) {
243
244 if (geo->gf_len == 14)
245 return false;
246 }
247 return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
248}
249
250
251
252
253
254
255
256static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
257 unsigned int ecc_strength,
258 unsigned int ecc_step)
259{
260 struct bch_geometry *geo = &this->bch_geometry;
261 struct nand_chip *chip = &this->nand;
262 struct mtd_info *mtd = nand_to_mtd(chip);
263 unsigned int block_mark_bit_offset;
264
265 switch (ecc_step) {
266 case SZ_512:
267 geo->gf_len = 13;
268 break;
269 case SZ_1K:
270 geo->gf_len = 14;
271 break;
272 default:
273 dev_err(this->dev,
274 "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
275 chip->base.eccreq.strength,
276 chip->base.eccreq.step_size);
277 return -EINVAL;
278 }
279 geo->ecc_chunk_size = ecc_step;
280 geo->ecc_strength = round_up(ecc_strength, 2);
281 if (!gpmi_check_ecc(this))
282 return -EINVAL;
283
284
285 if (geo->ecc_chunk_size < mtd->oobsize) {
286 dev_err(this->dev,
287 "unsupported nand chip. ecc size: %d, oob size : %d\n",
288 ecc_step, mtd->oobsize);
289 return -EINVAL;
290 }
291
292
293 geo->metadata_size = 10;
294
295 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345 geo->page_size = mtd->writesize + geo->metadata_size +
346 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
347
348 geo->payload_size = mtd->writesize;
349
350 geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
351 geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
352 + ALIGN(geo->ecc_chunk_count, 4);
353
354 if (!this->swap_block_mark)
355 return 0;
356
357
358 block_mark_bit_offset = mtd->writesize * 8 -
359 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
360 + geo->metadata_size * 8);
361
362 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
363 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
364 return 0;
365}
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385static inline int get_ecc_strength(struct gpmi_nand_data *this)
386{
387 struct bch_geometry *geo = &this->bch_geometry;
388 struct mtd_info *mtd = nand_to_mtd(&this->nand);
389 int ecc_strength;
390
391 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
392 / (geo->gf_len * geo->ecc_chunk_count);
393
394
395 return round_down(ecc_strength, 2);
396}
397
398static int legacy_set_geometry(struct gpmi_nand_data *this)
399{
400 struct bch_geometry *geo = &this->bch_geometry;
401 struct mtd_info *mtd = nand_to_mtd(&this->nand);
402 unsigned int metadata_size;
403 unsigned int status_size;
404 unsigned int block_mark_bit_offset;
405
406
407
408
409
410
411 geo->metadata_size = 10;
412
413
414 geo->gf_len = 13;
415
416
417 geo->ecc_chunk_size = 512;
418 while (geo->ecc_chunk_size < mtd->oobsize) {
419 geo->ecc_chunk_size *= 2;
420 geo->gf_len = 14;
421 }
422
423 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
424
425
426 geo->ecc_strength = get_ecc_strength(this);
427 if (!gpmi_check_ecc(this)) {
428 dev_err(this->dev,
429 "ecc strength: %d cannot be supported by the controller (%d)\n"
430 "try to use minimum ecc strength that NAND chip required\n",
431 geo->ecc_strength,
432 this->devdata->bch_max_ecc_strength);
433 return -EINVAL;
434 }
435
436 geo->page_size = mtd->writesize + geo->metadata_size +
437 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
438 geo->payload_size = mtd->writesize;
439
440
441
442
443
444
445
446 metadata_size = ALIGN(geo->metadata_size, 4);
447 status_size = ALIGN(geo->ecc_chunk_count, 4);
448
449 geo->auxiliary_size = metadata_size + status_size;
450 geo->auxiliary_status_offset = metadata_size;
451
452 if (!this->swap_block_mark)
453 return 0;
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 block_mark_bit_offset = mtd->writesize * 8 -
502 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
503 + geo->metadata_size * 8);
504
505 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
506 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
507 return 0;
508}
509
510static int common_nfc_set_geometry(struct gpmi_nand_data *this)
511{
512 struct nand_chip *chip = &this->nand;
513
514 if (chip->ecc.strength > 0 && chip->ecc.size > 0)
515 return set_geometry_by_ecc_info(this, chip->ecc.strength,
516 chip->ecc.size);
517
518 if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
519 || legacy_set_geometry(this)) {
520 if (!(chip->base.eccreq.strength > 0 &&
521 chip->base.eccreq.step_size > 0))
522 return -EINVAL;
523
524 return set_geometry_by_ecc_info(this,
525 chip->base.eccreq.strength,
526 chip->base.eccreq.step_size);
527 }
528
529 return 0;
530}
531
532
533static int bch_set_geometry(struct gpmi_nand_data *this)
534{
535 struct resources *r = &this->resources;
536 int ret;
537
538 ret = common_nfc_set_geometry(this);
539 if (ret)
540 return ret;
541
542 ret = pm_runtime_get_sync(this->dev);
543 if (ret < 0) {
544 pm_runtime_put_autosuspend(this->dev);
545 return ret;
546 }
547
548
549
550
551
552
553 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
554 if (ret)
555 goto err_out;
556
557
558 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
559
560 ret = 0;
561err_out:
562 pm_runtime_mark_last_busy(this->dev);
563 pm_runtime_put_autosuspend(this->dev);
564
565 return ret;
566}
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
643 const struct nand_sdr_timings *sdr)
644{
645 struct gpmi_nfc_hardware_timing *hw = &this->hw;
646 unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
647 unsigned int period_ps, reference_period_ps;
648 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
649 unsigned int tRP_ps;
650 bool use_half_period;
651 int sample_delay_ps, sample_delay_factor;
652 u16 busy_timeout_cycles;
653 u8 wrn_dly_sel;
654
655 if (sdr->tRC_min >= 30000) {
656
657 hw->clk_rate = 22000000;
658 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
659 } else if (sdr->tRC_min >= 25000) {
660
661 hw->clk_rate = 80000000;
662 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
663 } else {
664
665 hw->clk_rate = 100000000;
666 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
667 }
668
669
670 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
671
672 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
673 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
674 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
675 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
676
677 hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
678 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
679 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
680 hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
681
682
683
684
685
686
687
688
689 if (period_ps > dll_threshold_ps) {
690 use_half_period = true;
691 reference_period_ps = period_ps / 2;
692 } else {
693 use_half_period = false;
694 reference_period_ps = period_ps;
695 }
696
697 tRP_ps = data_setup_cycles * period_ps;
698 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
699 if (sample_delay_ps > 0)
700 sample_delay_factor = sample_delay_ps / reference_period_ps;
701 else
702 sample_delay_factor = 0;
703
704 hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
705 if (sample_delay_factor)
706 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
707 BM_GPMI_CTRL1_DLL_ENABLE |
708 (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
709}
710
711static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
712{
713 struct gpmi_nfc_hardware_timing *hw = &this->hw;
714 struct resources *r = &this->resources;
715 void __iomem *gpmi_regs = r->gpmi_regs;
716 unsigned int dll_wait_time_us;
717
718 clk_set_rate(r->clock[0], hw->clk_rate);
719
720 writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
721 writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
722
723
724
725
726
727 writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
728 writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
729
730
731 dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
732 if (!dll_wait_time_us)
733 dll_wait_time_us = 1;
734
735
736 udelay(dll_wait_time_us);
737}
738
739static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
740 const struct nand_data_interface *conf)
741{
742 struct gpmi_nand_data *this = nand_get_controller_data(chip);
743 const struct nand_sdr_timings *sdr;
744
745
746 sdr = nand_get_sdr_timings(conf);
747 if (IS_ERR(sdr))
748 return PTR_ERR(sdr);
749
750
751 if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
752 return -ENOTSUPP;
753
754
755 if (chipnr < 0)
756 return 0;
757
758
759 gpmi_nfc_compute_timings(this, sdr);
760
761 this->hw.must_apply_timings = true;
762
763 return 0;
764}
765
766
767static void gpmi_clear_bch(struct gpmi_nand_data *this)
768{
769 struct resources *r = &this->resources;
770 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
771}
772
773static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
774{
775
776 return this->dma_chans[0];
777}
778
779
780static void dma_irq_callback(void *param)
781{
782 struct gpmi_nand_data *this = param;
783 struct completion *dma_c = &this->dma_done;
784
785 complete(dma_c);
786}
787
788static irqreturn_t bch_irq(int irq, void *cookie)
789{
790 struct gpmi_nand_data *this = cookie;
791
792 gpmi_clear_bch(this);
793 complete(&this->bch_done);
794 return IRQ_HANDLED;
795}
796
797static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
798{
799
800
801
802
803 if (this->bch)
804 return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
805 else
806 return raw_len;
807}
808
809
810static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
811 int raw_len, struct scatterlist *sgl,
812 enum dma_data_direction dr)
813{
814 int ret;
815 int len = gpmi_raw_len_to_len(this, raw_len);
816
817
818 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
819 sg_init_one(sgl, buf, len);
820 ret = dma_map_sg(this->dev, sgl, 1, dr);
821 if (ret == 0)
822 goto map_fail;
823
824 return true;
825 }
826
827map_fail:
828
829 sg_init_one(sgl, this->data_buffer_dma, len);
830
831 if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
832 memcpy(this->data_buffer_dma, buf, len);
833
834 dma_map_sg(this->dev, sgl, 1, dr);
835
836 return false;
837}
838
839
840static uint8_t scan_ff_pattern[] = { 0xff };
841static struct nand_bbt_descr gpmi_bbt_descr = {
842 .options = 0,
843 .offs = 0,
844 .len = 1,
845 .pattern = scan_ff_pattern
846};
847
848
849
850
851
852static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
853 struct mtd_oob_region *oobregion)
854{
855 struct nand_chip *chip = mtd_to_nand(mtd);
856 struct gpmi_nand_data *this = nand_get_controller_data(chip);
857 struct bch_geometry *geo = &this->bch_geometry;
858
859 if (section)
860 return -ERANGE;
861
862 oobregion->offset = 0;
863 oobregion->length = geo->page_size - mtd->writesize;
864
865 return 0;
866}
867
868static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
869 struct mtd_oob_region *oobregion)
870{
871 struct nand_chip *chip = mtd_to_nand(mtd);
872 struct gpmi_nand_data *this = nand_get_controller_data(chip);
873 struct bch_geometry *geo = &this->bch_geometry;
874
875 if (section)
876 return -ERANGE;
877
878
879 if (geo->page_size < mtd->writesize + mtd->oobsize) {
880 oobregion->offset = geo->page_size - mtd->writesize;
881 oobregion->length = mtd->oobsize - oobregion->offset;
882 }
883
884 return 0;
885}
886
887static const char * const gpmi_clks_for_mx2x[] = {
888 "gpmi_io",
889};
890
891static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
892 .ecc = gpmi_ooblayout_ecc,
893 .free = gpmi_ooblayout_free,
894};
895
896static const struct gpmi_devdata gpmi_devdata_imx23 = {
897 .type = IS_MX23,
898 .bch_max_ecc_strength = 20,
899 .max_chain_delay = 16000,
900 .clks = gpmi_clks_for_mx2x,
901 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
902};
903
904static const struct gpmi_devdata gpmi_devdata_imx28 = {
905 .type = IS_MX28,
906 .bch_max_ecc_strength = 20,
907 .max_chain_delay = 16000,
908 .clks = gpmi_clks_for_mx2x,
909 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
910};
911
912static const char * const gpmi_clks_for_mx6[] = {
913 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
914};
915
916static const struct gpmi_devdata gpmi_devdata_imx6q = {
917 .type = IS_MX6Q,
918 .bch_max_ecc_strength = 40,
919 .max_chain_delay = 12000,
920 .clks = gpmi_clks_for_mx6,
921 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
922};
923
924static const struct gpmi_devdata gpmi_devdata_imx6sx = {
925 .type = IS_MX6SX,
926 .bch_max_ecc_strength = 62,
927 .max_chain_delay = 12000,
928 .clks = gpmi_clks_for_mx6,
929 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
930};
931
932static const char * const gpmi_clks_for_mx7d[] = {
933 "gpmi_io", "gpmi_bch_apb",
934};
935
936static const struct gpmi_devdata gpmi_devdata_imx7d = {
937 .type = IS_MX7D,
938 .bch_max_ecc_strength = 62,
939 .max_chain_delay = 12000,
940 .clks = gpmi_clks_for_mx7d,
941 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
942};
943
944static int acquire_register_block(struct gpmi_nand_data *this,
945 const char *res_name)
946{
947 struct platform_device *pdev = this->pdev;
948 struct resources *res = &this->resources;
949 struct resource *r;
950 void __iomem *p;
951
952 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
953 p = devm_ioremap_resource(&pdev->dev, r);
954 if (IS_ERR(p))
955 return PTR_ERR(p);
956
957 if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
958 res->gpmi_regs = p;
959 else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
960 res->bch_regs = p;
961 else
962 dev_err(this->dev, "unknown resource name : %s\n", res_name);
963
964 return 0;
965}
966
967static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
968{
969 struct platform_device *pdev = this->pdev;
970 const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
971 struct resource *r;
972 int err;
973
974 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
975 if (!r) {
976 dev_err(this->dev, "Can't get resource for %s\n", res_name);
977 return -ENODEV;
978 }
979
980 err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
981 if (err)
982 dev_err(this->dev, "error requesting BCH IRQ\n");
983
984 return err;
985}
986
987static void release_dma_channels(struct gpmi_nand_data *this)
988{
989 unsigned int i;
990 for (i = 0; i < DMA_CHANS; i++)
991 if (this->dma_chans[i]) {
992 dma_release_channel(this->dma_chans[i]);
993 this->dma_chans[i] = NULL;
994 }
995}
996
997static int acquire_dma_channels(struct gpmi_nand_data *this)
998{
999 struct platform_device *pdev = this->pdev;
1000 struct dma_chan *dma_chan;
1001 int ret = 0;
1002
1003
1004 dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
1005 if (IS_ERR(dma_chan)) {
1006 ret = PTR_ERR(dma_chan);
1007 if (ret != -EPROBE_DEFER)
1008 dev_err(this->dev, "DMA channel request failed: %d\n",
1009 ret);
1010 release_dma_channels(this);
1011 } else {
1012 this->dma_chans[0] = dma_chan;
1013 }
1014
1015 return ret;
1016}
1017
1018static int gpmi_get_clks(struct gpmi_nand_data *this)
1019{
1020 struct resources *r = &this->resources;
1021 struct clk *clk;
1022 int err, i;
1023
1024 for (i = 0; i < this->devdata->clks_count; i++) {
1025 clk = devm_clk_get(this->dev, this->devdata->clks[i]);
1026 if (IS_ERR(clk)) {
1027 err = PTR_ERR(clk);
1028 goto err_clock;
1029 }
1030
1031 r->clock[i] = clk;
1032 }
1033
1034 if (GPMI_IS_MX6(this))
1035
1036
1037
1038
1039
1040
1041 clk_set_rate(r->clock[0], 22000000);
1042
1043 return 0;
1044
1045err_clock:
1046 dev_dbg(this->dev, "failed in finding the clocks.\n");
1047 return err;
1048}
1049
1050static int acquire_resources(struct gpmi_nand_data *this)
1051{
1052 int ret;
1053
1054 ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
1055 if (ret)
1056 goto exit_regs;
1057
1058 ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
1059 if (ret)
1060 goto exit_regs;
1061
1062 ret = acquire_bch_irq(this, bch_irq);
1063 if (ret)
1064 goto exit_regs;
1065
1066 ret = acquire_dma_channels(this);
1067 if (ret)
1068 goto exit_regs;
1069
1070 ret = gpmi_get_clks(this);
1071 if (ret)
1072 goto exit_clock;
1073 return 0;
1074
1075exit_clock:
1076 release_dma_channels(this);
1077exit_regs:
1078 return ret;
1079}
1080
1081static void release_resources(struct gpmi_nand_data *this)
1082{
1083 release_dma_channels(this);
1084}
1085
1086static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
1087{
1088 struct device *dev = this->dev;
1089 struct bch_geometry *geo = &this->bch_geometry;
1090
1091 if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
1092 dma_free_coherent(dev, geo->auxiliary_size,
1093 this->auxiliary_virt,
1094 this->auxiliary_phys);
1095 kfree(this->data_buffer_dma);
1096 kfree(this->raw_buffer);
1097
1098 this->data_buffer_dma = NULL;
1099 this->raw_buffer = NULL;
1100}
1101
1102
1103static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
1104{
1105 struct bch_geometry *geo = &this->bch_geometry;
1106 struct device *dev = this->dev;
1107 struct mtd_info *mtd = nand_to_mtd(&this->nand);
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117 this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
1118 GFP_DMA | GFP_KERNEL);
1119 if (this->data_buffer_dma == NULL)
1120 goto error_alloc;
1121
1122 this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
1123 &this->auxiliary_phys, GFP_DMA);
1124 if (!this->auxiliary_virt)
1125 goto error_alloc;
1126
1127 this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
1128 if (!this->raw_buffer)
1129 goto error_alloc;
1130
1131 return 0;
1132
1133error_alloc:
1134 gpmi_free_dma_buffer(this);
1135 return -ENOMEM;
1136}
1137
1138
1139
1140
1141
1142
1143static void block_mark_swapping(struct gpmi_nand_data *this,
1144 void *payload, void *auxiliary)
1145{
1146 struct bch_geometry *nfc_geo = &this->bch_geometry;
1147 unsigned char *p;
1148 unsigned char *a;
1149 unsigned int bit;
1150 unsigned char mask;
1151 unsigned char from_data;
1152 unsigned char from_oob;
1153
1154 if (!this->swap_block_mark)
1155 return;
1156
1157
1158
1159
1160
1161 bit = nfc_geo->block_mark_bit_offset;
1162 p = payload + nfc_geo->block_mark_byte_offset;
1163 a = auxiliary;
1164
1165
1166
1167
1168
1169
1170
1171 from_data = (p[0] >> bit) | (p[1] << (8 - bit));
1172
1173
1174 from_oob = a[0];
1175
1176
1177 a[0] = from_data;
1178
1179 mask = (0x1 << bit) - 1;
1180 p[0] = (p[0] & mask) | (from_oob << bit);
1181
1182 mask = ~0 << bit;
1183 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1184}
1185
1186static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
1187 int last, int meta)
1188{
1189 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1190 struct bch_geometry *nfc_geo = &this->bch_geometry;
1191 struct mtd_info *mtd = nand_to_mtd(chip);
1192 int i;
1193 unsigned char *status;
1194 unsigned int max_bitflips = 0;
1195
1196
1197 status = this->auxiliary_virt + ALIGN(meta, 4);
1198
1199 for (i = first; i < last; i++, status++) {
1200 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1201 continue;
1202
1203 if (*status == STATUS_UNCORRECTABLE) {
1204 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1205 u8 *eccbuf = this->raw_buffer;
1206 int offset, bitoffset;
1207 int eccbytes;
1208 int flips;
1209
1210
1211 offset = nfc_geo->metadata_size * 8;
1212 offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1213 offset -= eccbits;
1214 bitoffset = offset % 8;
1215 eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1216 offset /= 8;
1217 eccbytes -= offset;
1218 nand_change_read_column_op(chip, offset, eccbuf,
1219 eccbytes, false);
1220
1221
1222
1223
1224
1225
1226
1227
1228 if (bitoffset)
1229 eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1230
1231 bitoffset = (bitoffset + eccbits) % 8;
1232 if (bitoffset)
1233 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246 if (i == 0) {
1247
1248 flips = nand_check_erased_ecc_chunk(
1249 buf + i * nfc_geo->ecc_chunk_size,
1250 nfc_geo->ecc_chunk_size,
1251 eccbuf, eccbytes,
1252 this->auxiliary_virt,
1253 nfc_geo->metadata_size,
1254 nfc_geo->ecc_strength);
1255 } else {
1256 flips = nand_check_erased_ecc_chunk(
1257 buf + i * nfc_geo->ecc_chunk_size,
1258 nfc_geo->ecc_chunk_size,
1259 eccbuf, eccbytes,
1260 NULL, 0,
1261 nfc_geo->ecc_strength);
1262 }
1263
1264 if (flips > 0) {
1265 max_bitflips = max_t(unsigned int, max_bitflips,
1266 flips);
1267 mtd->ecc_stats.corrected += flips;
1268 continue;
1269 }
1270
1271 mtd->ecc_stats.failed++;
1272 continue;
1273 }
1274
1275 mtd->ecc_stats.corrected += *status;
1276 max_bitflips = max_t(unsigned int, max_bitflips, *status);
1277 }
1278
1279 return max_bitflips;
1280}
1281
1282static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1283{
1284 struct bch_geometry *geo = &this->bch_geometry;
1285 unsigned int ecc_strength = geo->ecc_strength >> 1;
1286 unsigned int gf_len = geo->gf_len;
1287 unsigned int block_size = geo->ecc_chunk_size;
1288
1289 this->bch_flashlayout0 =
1290 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1291 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1292 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1293 BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1294 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
1295
1296 this->bch_flashlayout1 =
1297 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1298 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1299 BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1300 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
1301}
1302
1303static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1304 int oob_required, int page)
1305{
1306 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1307 struct mtd_info *mtd = nand_to_mtd(chip);
1308 struct bch_geometry *geo = &this->bch_geometry;
1309 unsigned int max_bitflips;
1310 int ret;
1311
1312 gpmi_bch_layout_std(this);
1313 this->bch = true;
1314
1315 ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1316 if (ret)
1317 return ret;
1318
1319 max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1320 geo->ecc_chunk_count,
1321 geo->auxiliary_status_offset);
1322
1323
1324 block_mark_swapping(this, buf, this->auxiliary_virt);
1325
1326 if (oob_required) {
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337 memset(chip->oob_poi, ~0, mtd->oobsize);
1338 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
1339 }
1340
1341 return max_bitflips;
1342}
1343
1344
1345static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1346 uint32_t len, uint8_t *buf, int page)
1347{
1348 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1349 struct bch_geometry *geo = &this->bch_geometry;
1350 int size = chip->ecc.size;
1351 int meta, n, page_size;
1352 unsigned int max_bitflips;
1353 unsigned int ecc_strength;
1354 int first, last, marker_pos;
1355 int ecc_parity_size;
1356 int col = 0;
1357 int ret;
1358
1359
1360 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1361
1362
1363 first = offs / size;
1364 last = (offs + len - 1) / size;
1365
1366 if (this->swap_block_mark) {
1367
1368
1369
1370
1371
1372
1373
1374 marker_pos = geo->block_mark_byte_offset / size;
1375 if (last >= marker_pos && first <= marker_pos) {
1376 dev_dbg(this->dev,
1377 "page:%d, first:%d, last:%d, marker at:%d\n",
1378 page, first, last, marker_pos);
1379 return gpmi_ecc_read_page(chip, buf, 0, page);
1380 }
1381 }
1382
1383 meta = geo->metadata_size;
1384 if (first) {
1385 col = meta + (size + ecc_parity_size) * first;
1386 meta = 0;
1387 buf = buf + first * size;
1388 }
1389
1390 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1391
1392 n = last - first + 1;
1393 page_size = meta + (size + ecc_parity_size) * n;
1394 ecc_strength = geo->ecc_strength >> 1;
1395
1396 this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
1397 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1398 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1399 BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1400 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
1401
1402 this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1403 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1404 BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1405 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
1406
1407 this->bch = true;
1408
1409 ret = nand_read_page_op(chip, page, col, buf, page_size);
1410 if (ret)
1411 return ret;
1412
1413 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1414 page, offs, len, col, first, n, page_size);
1415
1416 max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
1417
1418 return max_bitflips;
1419}
1420
1421static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1422 int oob_required, int page)
1423{
1424 struct mtd_info *mtd = nand_to_mtd(chip);
1425 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1426 struct bch_geometry *nfc_geo = &this->bch_geometry;
1427 int ret;
1428
1429 dev_dbg(this->dev, "ecc write page.\n");
1430
1431 gpmi_bch_layout_std(this);
1432 this->bch = true;
1433
1434 memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
1435
1436 if (this->swap_block_mark) {
1437
1438
1439
1440
1441 memcpy(this->data_buffer_dma, buf, mtd->writesize);
1442 buf = this->data_buffer_dma;
1443 block_mark_swapping(this, this->data_buffer_dma,
1444 this->auxiliary_virt);
1445 }
1446
1447 ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
1448
1449 return ret;
1450}
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1513{
1514 struct mtd_info *mtd = nand_to_mtd(chip);
1515 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1516 int ret;
1517
1518
1519 memset(chip->oob_poi, ~0, mtd->oobsize);
1520
1521
1522 ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1523 mtd->oobsize);
1524 if (ret)
1525 return ret;
1526
1527
1528
1529
1530
1531
1532 if (GPMI_IS_MX23(this)) {
1533
1534 ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1535 if (ret)
1536 return ret;
1537 }
1538
1539 return 0;
1540}
1541
1542static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
1543{
1544 struct mtd_info *mtd = nand_to_mtd(chip);
1545 struct mtd_oob_region of = { };
1546
1547
1548 mtd_ooblayout_free(mtd, 0, &of);
1549 if (!of.length)
1550 return -EPERM;
1551
1552 if (!nand_is_slc(chip))
1553 return -EPERM;
1554
1555 return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1556 chip->oob_poi + of.offset, of.length);
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1572 int oob_required, int page)
1573{
1574 struct mtd_info *mtd = nand_to_mtd(chip);
1575 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1576 struct bch_geometry *nfc_geo = &this->bch_geometry;
1577 int eccsize = nfc_geo->ecc_chunk_size;
1578 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1579 u8 *tmp_buf = this->raw_buffer;
1580 size_t src_bit_off;
1581 size_t oob_bit_off;
1582 size_t oob_byte_off;
1583 uint8_t *oob = chip->oob_poi;
1584 int step;
1585 int ret;
1586
1587 ret = nand_read_page_op(chip, page, 0, tmp_buf,
1588 mtd->writesize + mtd->oobsize);
1589 if (ret)
1590 return ret;
1591
1592
1593
1594
1595
1596
1597
1598
1599 if (this->swap_block_mark)
1600 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1601
1602
1603
1604
1605
1606 if (oob_required)
1607 memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1608
1609 oob_bit_off = nfc_geo->metadata_size * 8;
1610 src_bit_off = oob_bit_off;
1611
1612
1613 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1614 if (buf)
1615 nand_extract_bits(buf, step * eccsize, tmp_buf,
1616 src_bit_off, eccsize * 8);
1617 src_bit_off += eccsize * 8;
1618
1619
1620 if (step == nfc_geo->ecc_chunk_count - 1 &&
1621 (oob_bit_off + eccbits) % 8)
1622 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1623
1624 if (oob_required)
1625 nand_extract_bits(oob, oob_bit_off, tmp_buf,
1626 src_bit_off, eccbits);
1627
1628 src_bit_off += eccbits;
1629 oob_bit_off += eccbits;
1630 }
1631
1632 if (oob_required) {
1633 oob_byte_off = oob_bit_off / 8;
1634
1635 if (oob_byte_off < mtd->oobsize)
1636 memcpy(oob + oob_byte_off,
1637 tmp_buf + mtd->writesize + oob_byte_off,
1638 mtd->oobsize - oob_byte_off);
1639 }
1640
1641 return 0;
1642}
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1657 int oob_required, int page)
1658{
1659 struct mtd_info *mtd = nand_to_mtd(chip);
1660 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1661 struct bch_geometry *nfc_geo = &this->bch_geometry;
1662 int eccsize = nfc_geo->ecc_chunk_size;
1663 int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1664 u8 *tmp_buf = this->raw_buffer;
1665 uint8_t *oob = chip->oob_poi;
1666 size_t dst_bit_off;
1667 size_t oob_bit_off;
1668 size_t oob_byte_off;
1669 int step;
1670
1671
1672
1673
1674
1675
1676 if (!buf || !oob_required)
1677 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1678
1679
1680
1681
1682
1683 memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1684 oob_bit_off = nfc_geo->metadata_size * 8;
1685 dst_bit_off = oob_bit_off;
1686
1687
1688 for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1689 if (buf)
1690 nand_extract_bits(tmp_buf, dst_bit_off, buf,
1691 step * eccsize * 8, eccsize * 8);
1692 dst_bit_off += eccsize * 8;
1693
1694
1695 if (step == nfc_geo->ecc_chunk_count - 1 &&
1696 (oob_bit_off + eccbits) % 8)
1697 eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1698
1699 if (oob_required)
1700 nand_extract_bits(tmp_buf, dst_bit_off, oob,
1701 oob_bit_off, eccbits);
1702
1703 dst_bit_off += eccbits;
1704 oob_bit_off += eccbits;
1705 }
1706
1707 oob_byte_off = oob_bit_off / 8;
1708
1709 if (oob_required && oob_byte_off < mtd->oobsize)
1710 memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1711 oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1712
1713
1714
1715
1716
1717
1718
1719
1720 if (this->swap_block_mark)
1721 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1722
1723 return nand_prog_page_op(chip, page, 0, tmp_buf,
1724 mtd->writesize + mtd->oobsize);
1725}
1726
1727static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
1728{
1729 return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
1730}
1731
1732static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
1733{
1734 return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
1735}
1736
1737static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
1738{
1739 struct mtd_info *mtd = nand_to_mtd(chip);
1740 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1741 int ret = 0;
1742 uint8_t *block_mark;
1743 int column, page, chipnr;
1744
1745 chipnr = (int)(ofs >> chip->chip_shift);
1746 nand_select_target(chip, chipnr);
1747
1748 column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
1749
1750
1751 block_mark = this->data_buffer_dma;
1752 block_mark[0] = 0;
1753
1754
1755 page = (int)(ofs >> chip->page_shift);
1756
1757 ret = nand_prog_page_op(chip, page, column, block_mark, 1);
1758
1759 nand_deselect_target(chip);
1760
1761 return ret;
1762}
1763
1764static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1765{
1766 struct boot_rom_geometry *geometry = &this->rom_geometry;
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776 geometry->stride_size_in_pages = 64;
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786 geometry->search_area_stride_exponent = 2;
1787 return 0;
1788}
1789
1790static const char *fingerprint = "STMP";
1791static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1792{
1793 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1794 struct device *dev = this->dev;
1795 struct nand_chip *chip = &this->nand;
1796 unsigned int search_area_size_in_strides;
1797 unsigned int stride;
1798 unsigned int page;
1799 u8 *buffer = nand_get_data_buf(chip);
1800 int found_an_ncb_fingerprint = false;
1801 int ret;
1802
1803
1804 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1805
1806 nand_select_target(chip, 0);
1807
1808
1809
1810
1811 dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1812
1813 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1814
1815 page = stride * rom_geo->stride_size_in_pages;
1816
1817 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1818
1819
1820
1821
1822
1823 ret = nand_read_page_op(chip, page, 12, buffer,
1824 strlen(fingerprint));
1825 if (ret)
1826 continue;
1827
1828
1829 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1830 found_an_ncb_fingerprint = true;
1831 break;
1832 }
1833
1834 }
1835
1836 nand_deselect_target(chip);
1837
1838 if (found_an_ncb_fingerprint)
1839 dev_dbg(dev, "\tFound a fingerprint\n");
1840 else
1841 dev_dbg(dev, "\tNo fingerprint found\n");
1842 return found_an_ncb_fingerprint;
1843}
1844
1845
1846static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1847{
1848 struct device *dev = this->dev;
1849 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1850 struct nand_chip *chip = &this->nand;
1851 struct mtd_info *mtd = nand_to_mtd(chip);
1852 unsigned int block_size_in_pages;
1853 unsigned int search_area_size_in_strides;
1854 unsigned int search_area_size_in_pages;
1855 unsigned int search_area_size_in_blocks;
1856 unsigned int block;
1857 unsigned int stride;
1858 unsigned int page;
1859 u8 *buffer = nand_get_data_buf(chip);
1860 int status;
1861
1862
1863 block_size_in_pages = mtd->erasesize / mtd->writesize;
1864 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1865 search_area_size_in_pages = search_area_size_in_strides *
1866 rom_geo->stride_size_in_pages;
1867 search_area_size_in_blocks =
1868 (search_area_size_in_pages + (block_size_in_pages - 1)) /
1869 block_size_in_pages;
1870
1871 dev_dbg(dev, "Search Area Geometry :\n");
1872 dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
1873 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
1874 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
1875
1876 nand_select_target(chip, 0);
1877
1878
1879 dev_dbg(dev, "Erasing the search area...\n");
1880
1881 for (block = 0; block < search_area_size_in_blocks; block++) {
1882
1883 dev_dbg(dev, "\tErasing block 0x%x\n", block);
1884 status = nand_erase_op(chip, block);
1885 if (status)
1886 dev_err(dev, "[%s] Erase failed.\n", __func__);
1887 }
1888
1889
1890 memset(buffer, ~0, mtd->writesize);
1891 memcpy(buffer + 12, fingerprint, strlen(fingerprint));
1892
1893
1894 dev_dbg(dev, "Writing NCB fingerprints...\n");
1895 for (stride = 0; stride < search_area_size_in_strides; stride++) {
1896
1897 page = stride * rom_geo->stride_size_in_pages;
1898
1899
1900 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1901
1902 status = chip->ecc.write_page_raw(chip, buffer, 0, page);
1903 if (status)
1904 dev_err(dev, "[%s] Write failed.\n", __func__);
1905 }
1906
1907 nand_deselect_target(chip);
1908
1909 return 0;
1910}
1911
1912static int mx23_boot_init(struct gpmi_nand_data *this)
1913{
1914 struct device *dev = this->dev;
1915 struct nand_chip *chip = &this->nand;
1916 struct mtd_info *mtd = nand_to_mtd(chip);
1917 unsigned int block_count;
1918 unsigned int block;
1919 int chipnr;
1920 int page;
1921 loff_t byte;
1922 uint8_t block_mark;
1923 int ret = 0;
1924
1925
1926
1927
1928
1929
1930
1931 if (mx23_check_transcription_stamp(this))
1932 return 0;
1933
1934
1935
1936
1937
1938 dev_dbg(dev, "Transcribing bad block marks...\n");
1939
1940
1941 block_count = nanddev_eraseblocks_per_target(&chip->base);
1942
1943
1944
1945
1946
1947 for (block = 0; block < block_count; block++) {
1948
1949
1950
1951
1952 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
1953 page = block << (chip->phys_erase_shift - chip->page_shift);
1954 byte = block << chip->phys_erase_shift;
1955
1956
1957 nand_select_target(chip, chipnr);
1958 ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
1959 1);
1960 nand_deselect_target(chip);
1961
1962 if (ret)
1963 continue;
1964
1965
1966
1967
1968
1969
1970 if (block_mark != 0xff) {
1971 dev_dbg(dev, "Transcribing mark in block %u\n", block);
1972 ret = chip->legacy.block_markbad(chip, byte);
1973 if (ret)
1974 dev_err(dev,
1975 "Failed to mark block bad with ret %d\n",
1976 ret);
1977 }
1978 }
1979
1980
1981 mx23_write_transcription_stamp(this);
1982 return 0;
1983}
1984
1985static int nand_boot_init(struct gpmi_nand_data *this)
1986{
1987 nand_boot_set_geometry(this);
1988
1989
1990 if (GPMI_IS_MX23(this))
1991 return mx23_boot_init(this);
1992 return 0;
1993}
1994
1995static int gpmi_set_geometry(struct gpmi_nand_data *this)
1996{
1997 int ret;
1998
1999
2000 gpmi_free_dma_buffer(this);
2001
2002
2003 ret = bch_set_geometry(this);
2004 if (ret) {
2005 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
2006 return ret;
2007 }
2008
2009
2010 return gpmi_alloc_dma_buffer(this);
2011}
2012
2013static int gpmi_init_last(struct gpmi_nand_data *this)
2014{
2015 struct nand_chip *chip = &this->nand;
2016 struct mtd_info *mtd = nand_to_mtd(chip);
2017 struct nand_ecc_ctrl *ecc = &chip->ecc;
2018 struct bch_geometry *bch_geo = &this->bch_geometry;
2019 int ret;
2020
2021
2022 ret = gpmi_set_geometry(this);
2023 if (ret)
2024 return ret;
2025
2026
2027 ecc->read_page = gpmi_ecc_read_page;
2028 ecc->write_page = gpmi_ecc_write_page;
2029 ecc->read_oob = gpmi_ecc_read_oob;
2030 ecc->write_oob = gpmi_ecc_write_oob;
2031 ecc->read_page_raw = gpmi_ecc_read_page_raw;
2032 ecc->write_page_raw = gpmi_ecc_write_page_raw;
2033 ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
2034 ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
2035 ecc->mode = NAND_ECC_HW;
2036 ecc->size = bch_geo->ecc_chunk_size;
2037 ecc->strength = bch_geo->ecc_strength;
2038 mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
2039
2040
2041
2042
2043
2044
2045 if (GPMI_IS_MX6(this) &&
2046 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
2047 ecc->read_subpage = gpmi_ecc_read_subpage;
2048 chip->options |= NAND_SUBPAGE_READ;
2049 }
2050
2051 return 0;
2052}
2053
2054static int gpmi_nand_attach_chip(struct nand_chip *chip)
2055{
2056 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2057 int ret;
2058
2059 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2060 chip->bbt_options |= NAND_BBT_NO_OOB;
2061
2062 if (of_property_read_bool(this->dev->of_node,
2063 "fsl,no-blockmark-swap"))
2064 this->swap_block_mark = false;
2065 }
2066 dev_dbg(this->dev, "Blockmark swapping %sabled\n",
2067 this->swap_block_mark ? "en" : "dis");
2068
2069 ret = gpmi_init_last(this);
2070 if (ret)
2071 return ret;
2072
2073 chip->options |= NAND_SKIP_BBTSCAN;
2074
2075 return 0;
2076}
2077
2078static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2079{
2080 struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2081
2082 this->ntransfers++;
2083
2084 if (this->ntransfers == GPMI_MAX_TRANSFERS)
2085 return NULL;
2086
2087 return transfer;
2088}
2089
2090static struct dma_async_tx_descriptor *gpmi_chain_command(
2091 struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2092{
2093 struct dma_chan *channel = get_dma_chan(this);
2094 struct dma_async_tx_descriptor *desc;
2095 struct gpmi_transfer *transfer;
2096 int chip = this->nand.cur_cs;
2097 u32 pio[3];
2098
2099
2100 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2101 | BM_GPMI_CTRL0_WORD_LENGTH
2102 | BF_GPMI_CTRL0_CS(chip, this)
2103 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2104 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2105 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2106 | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2107 pio[1] = 0;
2108 pio[2] = 0;
2109 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2110 DMA_TRANS_NONE, 0);
2111 if (!desc)
2112 return NULL;
2113
2114 transfer = get_next_transfer(this);
2115 if (!transfer)
2116 return NULL;
2117
2118 transfer->cmdbuf[0] = cmd;
2119 if (naddr)
2120 memcpy(&transfer->cmdbuf[1], addr, naddr);
2121
2122 sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2123 dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2124
2125 transfer->direction = DMA_TO_DEVICE;
2126
2127 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2128 MXS_DMA_CTRL_WAIT4END);
2129 return desc;
2130}
2131
2132static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2133 struct gpmi_nand_data *this)
2134{
2135 struct dma_chan *channel = get_dma_chan(this);
2136 u32 pio[2];
2137
2138 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2139 | BM_GPMI_CTRL0_WORD_LENGTH
2140 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2141 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2142 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2143 | BF_GPMI_CTRL0_XFER_COUNT(0);
2144 pio[1] = 0;
2145
2146 return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2147 MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2148}
2149
2150static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2151 struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2152{
2153 struct dma_async_tx_descriptor *desc;
2154 struct dma_chan *channel = get_dma_chan(this);
2155 struct gpmi_transfer *transfer;
2156 u32 pio[6] = {};
2157
2158 transfer = get_next_transfer(this);
2159 if (!transfer)
2160 return NULL;
2161
2162 transfer->direction = DMA_FROM_DEVICE;
2163
2164 *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2165 DMA_FROM_DEVICE);
2166
2167 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2168 | BM_GPMI_CTRL0_WORD_LENGTH
2169 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2170 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2171 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2172 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2173
2174 if (this->bch) {
2175 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2176 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2177 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2178 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2179 pio[3] = raw_len;
2180 pio[4] = transfer->sgl.dma_address;
2181 pio[5] = this->auxiliary_phys;
2182 }
2183
2184 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2185 DMA_TRANS_NONE, 0);
2186 if (!desc)
2187 return NULL;
2188
2189 if (!this->bch)
2190 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2191 DMA_DEV_TO_MEM,
2192 MXS_DMA_CTRL_WAIT4END);
2193
2194 return desc;
2195}
2196
2197static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2198 struct gpmi_nand_data *this, const void *buf, int raw_len)
2199{
2200 struct dma_chan *channel = get_dma_chan(this);
2201 struct dma_async_tx_descriptor *desc;
2202 struct gpmi_transfer *transfer;
2203 u32 pio[6] = {};
2204
2205 transfer = get_next_transfer(this);
2206 if (!transfer)
2207 return NULL;
2208
2209 transfer->direction = DMA_TO_DEVICE;
2210
2211 prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2212
2213 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2214 | BM_GPMI_CTRL0_WORD_LENGTH
2215 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2216 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2217 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2218 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2219
2220 if (this->bch) {
2221 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2222 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2223 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2224 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2225 pio[3] = raw_len;
2226 pio[4] = transfer->sgl.dma_address;
2227 pio[5] = this->auxiliary_phys;
2228 }
2229
2230 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2231 DMA_TRANS_NONE,
2232 (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2233 if (!desc)
2234 return NULL;
2235
2236 if (!this->bch)
2237 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2238 DMA_MEM_TO_DEV,
2239 MXS_DMA_CTRL_WAIT4END);
2240
2241 return desc;
2242}
2243
2244static int gpmi_nfc_exec_op(struct nand_chip *chip,
2245 const struct nand_operation *op,
2246 bool check_only)
2247{
2248 const struct nand_op_instr *instr;
2249 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2250 struct dma_async_tx_descriptor *desc = NULL;
2251 int i, ret, buf_len = 0, nbufs = 0;
2252 u8 cmd = 0;
2253 void *buf_read = NULL;
2254 const void *buf_write = NULL;
2255 bool direct = false;
2256 struct completion *completion;
2257 unsigned long to;
2258
2259 if (check_only)
2260 return 0;
2261
2262 this->ntransfers = 0;
2263 for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2264 this->transfers[i].direction = DMA_NONE;
2265
2266 ret = pm_runtime_get_sync(this->dev);
2267 if (ret < 0)
2268 return ret;
2269
2270
2271
2272
2273
2274
2275
2276 if (this->hw.must_apply_timings) {
2277 this->hw.must_apply_timings = false;
2278 gpmi_nfc_apply_timings(this);
2279 }
2280
2281 dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2282
2283 for (i = 0; i < op->ninstrs; i++) {
2284 instr = &op->instrs[i];
2285
2286 nand_op_trace(" ", instr);
2287
2288 switch (instr->type) {
2289 case NAND_OP_WAITRDY_INSTR:
2290 desc = gpmi_chain_wait_ready(this);
2291 break;
2292 case NAND_OP_CMD_INSTR:
2293 cmd = instr->ctx.cmd.opcode;
2294
2295
2296
2297
2298
2299 if (i + 1 != op->ninstrs &&
2300 op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2301 continue;
2302
2303 desc = gpmi_chain_command(this, cmd, NULL, 0);
2304
2305 break;
2306 case NAND_OP_ADDR_INSTR:
2307 desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2308 instr->ctx.addr.naddrs);
2309 break;
2310 case NAND_OP_DATA_OUT_INSTR:
2311 buf_write = instr->ctx.data.buf.out;
2312 buf_len = instr->ctx.data.len;
2313 nbufs++;
2314
2315 desc = gpmi_chain_data_write(this, buf_write, buf_len);
2316
2317 break;
2318 case NAND_OP_DATA_IN_INSTR:
2319 if (!instr->ctx.data.len)
2320 break;
2321 buf_read = instr->ctx.data.buf.in;
2322 buf_len = instr->ctx.data.len;
2323 nbufs++;
2324
2325 desc = gpmi_chain_data_read(this, buf_read, buf_len,
2326 &direct);
2327 break;
2328 }
2329
2330 if (!desc) {
2331 ret = -ENXIO;
2332 goto unmap;
2333 }
2334 }
2335
2336 dev_dbg(this->dev, "%s setup done\n", __func__);
2337
2338 if (nbufs > 1) {
2339 dev_err(this->dev, "Multiple data instructions not supported\n");
2340 ret = -EINVAL;
2341 goto unmap;
2342 }
2343
2344 if (this->bch) {
2345 writel(this->bch_flashlayout0,
2346 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2347 writel(this->bch_flashlayout1,
2348 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2349 }
2350
2351 if (this->bch && buf_read) {
2352 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2353 this->resources.bch_regs + HW_BCH_CTRL_SET);
2354 completion = &this->bch_done;
2355 } else {
2356 desc->callback = dma_irq_callback;
2357 desc->callback_param = this;
2358 completion = &this->dma_done;
2359 }
2360
2361 init_completion(completion);
2362
2363 dmaengine_submit(desc);
2364 dma_async_issue_pending(get_dma_chan(this));
2365
2366 to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
2367 if (!to) {
2368 dev_err(this->dev, "DMA timeout, last DMA\n");
2369 gpmi_dump_info(this);
2370 ret = -ETIMEDOUT;
2371 goto unmap;
2372 }
2373
2374 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2375 this->resources.bch_regs + HW_BCH_CTRL_CLR);
2376 gpmi_clear_bch(this);
2377
2378 ret = 0;
2379
2380unmap:
2381 for (i = 0; i < this->ntransfers; i++) {
2382 struct gpmi_transfer *transfer = &this->transfers[i];
2383
2384 if (transfer->direction != DMA_NONE)
2385 dma_unmap_sg(this->dev, &transfer->sgl, 1,
2386 transfer->direction);
2387 }
2388
2389 if (!ret && buf_read && !direct)
2390 memcpy(buf_read, this->data_buffer_dma,
2391 gpmi_raw_len_to_len(this, buf_len));
2392
2393 this->bch = false;
2394
2395 pm_runtime_mark_last_busy(this->dev);
2396 pm_runtime_put_autosuspend(this->dev);
2397
2398 return ret;
2399}
2400
2401static const struct nand_controller_ops gpmi_nand_controller_ops = {
2402 .attach_chip = gpmi_nand_attach_chip,
2403 .setup_data_interface = gpmi_setup_data_interface,
2404 .exec_op = gpmi_nfc_exec_op,
2405};
2406
2407static int gpmi_nand_init(struct gpmi_nand_data *this)
2408{
2409 struct nand_chip *chip = &this->nand;
2410 struct mtd_info *mtd = nand_to_mtd(chip);
2411 int ret;
2412
2413
2414 mtd->name = "gpmi-nand";
2415 mtd->dev.parent = this->dev;
2416
2417
2418 nand_set_controller_data(chip, this);
2419 nand_set_flash_node(chip, this->pdev->dev.of_node);
2420 chip->legacy.block_markbad = gpmi_block_markbad;
2421 chip->badblock_pattern = &gpmi_bbt_descr;
2422 chip->options |= NAND_NO_SUBPAGE_WRITE;
2423
2424
2425 this->swap_block_mark = !GPMI_IS_MX23(this);
2426
2427
2428
2429
2430
2431 this->bch_geometry.payload_size = 1024;
2432 this->bch_geometry.auxiliary_size = 128;
2433 ret = gpmi_alloc_dma_buffer(this);
2434 if (ret)
2435 goto err_out;
2436
2437 nand_controller_init(&this->base);
2438 this->base.ops = &gpmi_nand_controller_ops;
2439 chip->controller = &this->base;
2440
2441 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
2442 if (ret)
2443 goto err_out;
2444
2445 ret = nand_boot_init(this);
2446 if (ret)
2447 goto err_nand_cleanup;
2448 ret = nand_create_bbt(chip);
2449 if (ret)
2450 goto err_nand_cleanup;
2451
2452 ret = mtd_device_register(mtd, NULL, 0);
2453 if (ret)
2454 goto err_nand_cleanup;
2455 return 0;
2456
2457err_nand_cleanup:
2458 nand_cleanup(chip);
2459err_out:
2460 gpmi_free_dma_buffer(this);
2461 return ret;
2462}
2463
2464static const struct of_device_id gpmi_nand_id_table[] = {
2465 {
2466 .compatible = "fsl,imx23-gpmi-nand",
2467 .data = &gpmi_devdata_imx23,
2468 }, {
2469 .compatible = "fsl,imx28-gpmi-nand",
2470 .data = &gpmi_devdata_imx28,
2471 }, {
2472 .compatible = "fsl,imx6q-gpmi-nand",
2473 .data = &gpmi_devdata_imx6q,
2474 }, {
2475 .compatible = "fsl,imx6sx-gpmi-nand",
2476 .data = &gpmi_devdata_imx6sx,
2477 }, {
2478 .compatible = "fsl,imx7d-gpmi-nand",
2479 .data = &gpmi_devdata_imx7d,
2480 }, {}
2481};
2482MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
2483
2484static int gpmi_nand_probe(struct platform_device *pdev)
2485{
2486 struct gpmi_nand_data *this;
2487 const struct of_device_id *of_id;
2488 int ret;
2489
2490 this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
2491 if (!this)
2492 return -ENOMEM;
2493
2494 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
2495 if (of_id) {
2496 this->devdata = of_id->data;
2497 } else {
2498 dev_err(&pdev->dev, "Failed to find the right device id.\n");
2499 return -ENODEV;
2500 }
2501
2502 platform_set_drvdata(pdev, this);
2503 this->pdev = pdev;
2504 this->dev = &pdev->dev;
2505
2506 ret = acquire_resources(this);
2507 if (ret)
2508 goto exit_acquire_resources;
2509
2510 ret = __gpmi_enable_clk(this, true);
2511 if (ret)
2512 goto exit_acquire_resources;
2513
2514 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2515 pm_runtime_use_autosuspend(&pdev->dev);
2516 pm_runtime_set_active(&pdev->dev);
2517 pm_runtime_enable(&pdev->dev);
2518 pm_runtime_get_sync(&pdev->dev);
2519
2520 ret = gpmi_init(this);
2521 if (ret)
2522 goto exit_nfc_init;
2523
2524 ret = gpmi_nand_init(this);
2525 if (ret)
2526 goto exit_nfc_init;
2527
2528 pm_runtime_mark_last_busy(&pdev->dev);
2529 pm_runtime_put_autosuspend(&pdev->dev);
2530
2531 dev_info(this->dev, "driver registered.\n");
2532
2533 return 0;
2534
2535exit_nfc_init:
2536 pm_runtime_put(&pdev->dev);
2537 pm_runtime_disable(&pdev->dev);
2538 release_resources(this);
2539exit_acquire_resources:
2540
2541 return ret;
2542}
2543
2544static int gpmi_nand_remove(struct platform_device *pdev)
2545{
2546 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2547 struct nand_chip *chip = &this->nand;
2548 int ret;
2549
2550 pm_runtime_put_sync(&pdev->dev);
2551 pm_runtime_disable(&pdev->dev);
2552
2553 ret = mtd_device_unregister(nand_to_mtd(chip));
2554 WARN_ON(ret);
2555 nand_cleanup(chip);
2556 gpmi_free_dma_buffer(this);
2557 release_resources(this);
2558 return 0;
2559}
2560
2561#ifdef CONFIG_PM_SLEEP
2562static int gpmi_pm_suspend(struct device *dev)
2563{
2564 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2565
2566 release_dma_channels(this);
2567 return 0;
2568}
2569
2570static int gpmi_pm_resume(struct device *dev)
2571{
2572 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2573 int ret;
2574
2575 ret = acquire_dma_channels(this);
2576 if (ret < 0)
2577 return ret;
2578
2579
2580 ret = gpmi_init(this);
2581 if (ret) {
2582 dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2583 return ret;
2584 }
2585
2586
2587 if (this->hw.clk_rate)
2588 this->hw.must_apply_timings = true;
2589
2590
2591 ret = bch_set_geometry(this);
2592 if (ret) {
2593 dev_err(this->dev, "Error setting BCH : %d\n", ret);
2594 return ret;
2595 }
2596
2597 return 0;
2598}
2599#endif
2600
2601static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2602{
2603 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2604
2605 return __gpmi_enable_clk(this, false);
2606}
2607
2608static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2609{
2610 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2611
2612 return __gpmi_enable_clk(this, true);
2613}
2614
2615static const struct dev_pm_ops gpmi_pm_ops = {
2616 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2617 SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
2618};
2619
2620static struct platform_driver gpmi_nand_driver = {
2621 .driver = {
2622 .name = "gpmi-nand",
2623 .pm = &gpmi_pm_ops,
2624 .of_match_table = gpmi_nand_id_table,
2625 },
2626 .probe = gpmi_nand_probe,
2627 .remove = gpmi_nand_remove,
2628};
2629module_platform_driver(gpmi_nand_driver);
2630
2631MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2632MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2633MODULE_LICENSE("GPL");
2634