1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/slab.h>
24
25#include "gpmi-nand.h"
26#include "gpmi-regs.h"
27#include "bch-regs.h"
28
29static struct timing_threshod timing_default_threshold = {
30 .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
31 BP_GPMI_TIMING0_DATA_SETUP),
32 .internal_data_setup_in_ns = 0,
33 .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >>
34 BP_GPMI_CTRL1_RDN_DELAY),
35 .max_dll_clock_period_in_ns = 32,
36 .max_dll_delay_in_ns = 16,
37};
38
39#define MXS_SET_ADDR 0x4
40#define MXS_CLR_ADDR 0x8
41
42
43
44
45
46static int clear_poll_bit(void __iomem *addr, u32 mask)
47{
48 int timeout = 0x400;
49
50
51 writel(mask, addr + MXS_CLR_ADDR);
52
53
54
55
56
57 udelay(1);
58
59
60 while ((readl(addr) & mask) && --timeout)
61 ;
62
63 return !timeout;
64}
65
66#define MODULE_CLKGATE (1 << 30)
67#define MODULE_SFTRST (1 << 31)
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
86{
87 int ret;
88 int timeout = 0x400;
89
90
91 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
92 if (unlikely(ret))
93 goto error;
94
95
96 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
97
98 if (!just_enable) {
99
100 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
101 udelay(1);
102
103
104 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
105 ;
106 if (unlikely(!timeout))
107 goto error;
108 }
109
110
111 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
112 if (unlikely(ret))
113 goto error;
114
115
116 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
117 if (unlikely(ret))
118 goto error;
119
120 return 0;
121
122error:
123 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
124 return -ETIMEDOUT;
125}
126
127static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
128{
129 struct clk *clk;
130 int ret;
131 int i;
132
133 for (i = 0; i < GPMI_CLK_MAX; i++) {
134 clk = this->resources.clock[i];
135 if (!clk)
136 break;
137
138 if (v) {
139 ret = clk_prepare_enable(clk);
140 if (ret)
141 goto err_clk;
142 } else {
143 clk_disable_unprepare(clk);
144 }
145 }
146 return 0;
147
148err_clk:
149 for (; i > 0; i--)
150 clk_disable_unprepare(this->resources.clock[i - 1]);
151 return ret;
152}
153
154#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
155#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
156
157int gpmi_init(struct gpmi_nand_data *this)
158{
159 struct resources *r = &this->resources;
160 int ret;
161
162 ret = gpmi_enable_clk(this);
163 if (ret)
164 goto err_out;
165 ret = gpmi_reset_block(r->gpmi_regs, false);
166 if (ret)
167 goto err_out;
168
169
170
171
172
173 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
174 if (ret)
175 goto err_out;
176
177
178
179 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
180
181
182 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
183 r->gpmi_regs + HW_GPMI_CTRL1_SET);
184
185
186 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
187
188
189 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
190
191
192
193
194
195 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
196
197 gpmi_disable_clk(this);
198 return 0;
199err_out:
200 return ret;
201}
202
203
204void gpmi_dump_info(struct gpmi_nand_data *this)
205{
206 struct resources *r = &this->resources;
207 struct bch_geometry *geo = &this->bch_geometry;
208 u32 reg;
209 int i;
210
211 dev_err(this->dev, "Show GPMI registers :\n");
212 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
213 reg = readl(r->gpmi_regs + i * 0x10);
214 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
215 }
216
217
218 dev_err(this->dev, "Show BCH registers :\n");
219 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
220 reg = readl(r->bch_regs + i * 0x10);
221 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
222 }
223 dev_err(this->dev, "BCH Geometry :\n"
224 "GF length : %u\n"
225 "ECC Strength : %u\n"
226 "Page Size in Bytes : %u\n"
227 "Metadata Size in Bytes : %u\n"
228 "ECC Chunk Size in Bytes: %u\n"
229 "ECC Chunk Count : %u\n"
230 "Payload Size in Bytes : %u\n"
231 "Auxiliary Size in Bytes: %u\n"
232 "Auxiliary Status Offset: %u\n"
233 "Block Mark Byte Offset : %u\n"
234 "Block Mark Bit Offset : %u\n",
235 geo->gf_len,
236 geo->ecc_strength,
237 geo->page_size,
238 geo->metadata_size,
239 geo->ecc_chunk_size,
240 geo->ecc_chunk_count,
241 geo->payload_size,
242 geo->auxiliary_size,
243 geo->auxiliary_status_offset,
244 geo->block_mark_byte_offset,
245 geo->block_mark_bit_offset);
246}
247
248
249int bch_set_geometry(struct gpmi_nand_data *this)
250{
251 struct resources *r = &this->resources;
252 struct bch_geometry *bch_geo = &this->bch_geometry;
253 unsigned int block_count;
254 unsigned int block_size;
255 unsigned int metadata_size;
256 unsigned int ecc_strength;
257 unsigned int page_size;
258 unsigned int gf_len;
259 int ret;
260
261 if (common_nfc_set_geometry(this))
262 return !0;
263
264 block_count = bch_geo->ecc_chunk_count - 1;
265 block_size = bch_geo->ecc_chunk_size;
266 metadata_size = bch_geo->metadata_size;
267 ecc_strength = bch_geo->ecc_strength >> 1;
268 page_size = bch_geo->page_size;
269 gf_len = bch_geo->gf_len;
270
271 ret = gpmi_enable_clk(this);
272 if (ret)
273 goto err_out;
274
275
276
277
278
279
280
281
282
283 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
284 if (ret)
285 goto err_out;
286
287
288 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
289 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
290 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
291 | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
292 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
293 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
294
295 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
296 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
297 | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
298 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
299 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
300
301
302 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
303
304
305 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
306 r->bch_regs + HW_BCH_CTRL_SET);
307
308 gpmi_disable_clk(this);
309 return 0;
310err_out:
311 return ret;
312}
313
314
315static unsigned int ns_to_cycles(unsigned int time,
316 unsigned int period, unsigned int min)
317{
318 unsigned int k;
319
320 k = (time + period - 1) / period;
321 return max(k, min);
322}
323
324#define DEF_MIN_PROP_DELAY 5
325#define DEF_MAX_PROP_DELAY 9
326
327static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
328 struct gpmi_nfc_hardware_timing *hw)
329{
330 struct timing_threshod *nfc = &timing_default_threshold;
331 struct resources *r = &this->resources;
332 struct nand_chip *nand = &this->nand;
333 struct nand_timing target = this->timing;
334 bool improved_timing_is_available;
335 unsigned long clock_frequency_in_hz;
336 unsigned int clock_period_in_ns;
337 bool dll_use_half_periods;
338 unsigned int dll_delay_shift;
339 unsigned int max_sample_delay_in_ns;
340 unsigned int address_setup_in_cycles;
341 unsigned int data_setup_in_ns;
342 unsigned int data_setup_in_cycles;
343 unsigned int data_hold_in_cycles;
344 int ideal_sample_delay_in_ns;
345 unsigned int sample_delay_factor;
346 int tEYE;
347 unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
348 unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
349
350
351
352
353
354 if (nand->numchips > 2) {
355 target.data_setup_in_ns += 10;
356 target.data_hold_in_ns += 10;
357 target.address_setup_in_ns += 10;
358 } else if (nand->numchips > 1) {
359 target.data_setup_in_ns += 5;
360 target.data_hold_in_ns += 5;
361 target.address_setup_in_ns += 5;
362 }
363
364
365 improved_timing_is_available =
366 (target.tREA_in_ns >= 0) &&
367 (target.tRLOH_in_ns >= 0) &&
368 (target.tRHOH_in_ns >= 0);
369
370
371 nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
372 clock_frequency_in_hz = nfc->clock_frequency_in_hz;
373 clock_period_in_ns = NSEC_PER_SEC / clock_frequency_in_hz;
374
375
376
377
378
379
380
381
382
383
384
385 data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns,
386 clock_period_in_ns, 1);
387 data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns,
388 clock_period_in_ns, 1);
389 address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
390 clock_period_in_ns, 0);
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450 if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
451 dll_use_half_periods = true;
452 dll_delay_shift = 3 + 1;
453 } else {
454 dll_use_half_periods = false;
455 dll_delay_shift = 3;
456 }
457
458
459
460
461
462
463 if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
464 max_sample_delay_in_ns = 0;
465 else {
466
467
468
469
470 max_sample_delay_in_ns =
471 (nfc->max_sample_delay_factor * clock_period_in_ns) >>
472 dll_delay_shift;
473
474
475
476
477
478 if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
479 max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
480 }
481
482
483
484
485
486 if (!improved_timing_is_available) {
487
488
489
490
491 ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
492 nfc->internal_data_setup_in_ns;
493
494
495
496
497
498
499
500
501
502
503
504 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
505 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
506
507 data_setup_in_cycles++;
508 ideal_sample_delay_in_ns -= clock_period_in_ns;
509
510 if (ideal_sample_delay_in_ns < 0)
511 ideal_sample_delay_in_ns = 0;
512
513 }
514
515
516
517
518
519
520
521
522
523
524 sample_delay_factor =
525 ns_to_cycles(
526 ideal_sample_delay_in_ns << dll_delay_shift,
527 clock_period_in_ns, 0);
528
529 if (sample_delay_factor > nfc->max_sample_delay_factor)
530 sample_delay_factor = nfc->max_sample_delay_factor;
531
532
533 goto return_results;
534 }
535
536
537
538
539
540
541
542
543
544
545 max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
546
547
548
549
550
551 data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
552
553
554
555
556
557
558
559
560
561
562 tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
563 (int)data_setup_in_ns;
564
565 tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
566
567
568
569
570
571
572
573
574
575 while ((tEYE <= 0) &&
576 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
577
578 data_setup_in_cycles++;
579
580 data_setup_in_ns += clock_period_in_ns;
581
582 tEYE += clock_period_in_ns;
583 }
584
585
586
587
588
589
590
591
592
593
594
595 ideal_sample_delay_in_ns =
596 ((int)max_prop_delay_in_ns +
597 (int)target.tREA_in_ns +
598 (int)min_prop_delay_in_ns +
599 (int)target.tRHOH_in_ns -
600 (int)data_setup_in_ns) >> 1;
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639 if (ideal_sample_delay_in_ns < 0)
640 ideal_sample_delay_in_ns = 0;
641
642
643
644
645
646 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
647 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
648
649
650 data_setup_in_cycles++;
651
652 data_setup_in_ns += clock_period_in_ns;
653
654 tEYE += clock_period_in_ns;
655
656
657
658
659
660 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
661
662
663 if (ideal_sample_delay_in_ns < 0)
664 ideal_sample_delay_in_ns = 0;
665 }
666
667
668
669
670
671
672
673
674
675
676 sample_delay_factor =
677 ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
678 clock_period_in_ns, 0);
679
680 if (sample_delay_factor > nfc->max_sample_delay_factor)
681 sample_delay_factor = nfc->max_sample_delay_factor;
682
683
684
685
686
687
688 #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns)
689
690 #define QUANTIZED_DELAY \
691 ((int) ((sample_delay_factor * clock_period_in_ns) >> \
692 dll_delay_shift))
693
694 #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY))
695
696 #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1))
697
698
699
700
701
702
703
704 while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
705 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
706
707
708
709
710
711 if (QUANTIZED_DELAY > IDEAL_DELAY) {
712
713
714
715
716
717 if (sample_delay_factor != 0)
718 sample_delay_factor--;
719 continue;
720 }
721
722
723
724
725
726
727
728
729 data_setup_in_cycles++;
730
731 data_setup_in_ns += clock_period_in_ns;
732
733 tEYE += clock_period_in_ns;
734
735
736
737
738
739 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
740
741
742 ideal_sample_delay_in_ns -= clock_period_in_ns;
743
744
745 if (ideal_sample_delay_in_ns < 0)
746 ideal_sample_delay_in_ns = 0;
747
748
749
750
751
752 sample_delay_factor =
753 ns_to_cycles(
754 ideal_sample_delay_in_ns << dll_delay_shift,
755 clock_period_in_ns, 0);
756
757 if (sample_delay_factor > nfc->max_sample_delay_factor)
758 sample_delay_factor = nfc->max_sample_delay_factor;
759 }
760
761
762return_results:
763 hw->data_setup_in_cycles = data_setup_in_cycles;
764 hw->data_hold_in_cycles = data_hold_in_cycles;
765 hw->address_setup_in_cycles = address_setup_in_cycles;
766 hw->use_half_periods = dll_use_half_periods;
767 hw->sample_delay_factor = sample_delay_factor;
768 hw->device_busy_timeout = GPMI_DEFAULT_BUSY_TIMEOUT;
769 hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
770
771
772 return 0;
773}
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
859 struct gpmi_nfc_hardware_timing *hw)
860{
861 struct resources *r = &this->resources;
862 unsigned long rate = clk_get_rate(r->clock[0]);
863 int mode = this->timing_mode;
864 int dll_threshold = this->devdata->max_chain_delay;
865 unsigned long delay;
866 unsigned long clk_period;
867 int t_rea;
868 int c = 4;
869 int t_rp;
870 int rp;
871
872
873
874
875
876
877
878
879 hw->data_setup_in_cycles = 1;
880 hw->data_hold_in_cycles = 1;
881 hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0);
882
883
884 hw->device_busy_timeout = 0x9000;
885
886
887 hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
888
889
890
891
892
893 clk_period = NSEC_PER_SEC / (rate / 10);
894 dll_threshold *= 10;
895 t_rea = ((mode == 5) ? 16 : 20) * 10;
896 c *= 10;
897
898 t_rp = clk_period * 1;
899
900 if (clk_period > dll_threshold) {
901 hw->use_half_periods = 1;
902 rp = clk_period / 2;
903 } else {
904 hw->use_half_periods = 0;
905 rp = clk_period;
906 }
907
908
909
910
911
912 delay = (((t_rea + c - t_rp) * 8) * 10) / rp;
913 delay = (delay + 5) / 10;
914
915 hw->sample_delay_factor = delay;
916}
917
918static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
919{
920 struct resources *r = &this->resources;
921 struct nand_chip *nand = &this->nand;
922 struct mtd_info *mtd = &this->mtd;
923 uint8_t *feature;
924 unsigned long rate;
925 int ret;
926
927 feature = kzalloc(ONFI_SUBFEATURE_PARAM_LEN, GFP_KERNEL);
928 if (!feature)
929 return -ENOMEM;
930
931 nand->select_chip(mtd, 0);
932
933
934 feature[0] = mode;
935 ret = nand->onfi_set_features(mtd, nand,
936 ONFI_FEATURE_ADDR_TIMING_MODE, feature);
937 if (ret)
938 goto err_out;
939
940
941 memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN);
942 ret = nand->onfi_get_features(mtd, nand,
943 ONFI_FEATURE_ADDR_TIMING_MODE, feature);
944 if (ret || feature[0] != mode)
945 goto err_out;
946
947 nand->select_chip(mtd, -1);
948
949
950 rate = (mode == 5) ? 100000000 : 80000000;
951 clk_set_rate(r->clock[0], rate);
952
953
954 this->flags &= ~GPMI_TIMING_INIT_OK;
955
956 this->flags |= GPMI_ASYNC_EDO_ENABLED;
957 this->timing_mode = mode;
958 kfree(feature);
959 dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
960 return 0;
961
962err_out:
963 nand->select_chip(mtd, -1);
964 kfree(feature);
965 dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
966 return -EINVAL;
967}
968
969int gpmi_extra_init(struct gpmi_nand_data *this)
970{
971 struct nand_chip *chip = &this->nand;
972
973
974 if (GPMI_IS_MX6(this) && chip->onfi_version) {
975 int mode = onfi_get_async_timing_mode(chip);
976
977
978 if (mode & ONFI_TIMING_MODE_5)
979 mode = 5;
980 else if (mode & ONFI_TIMING_MODE_4)
981 mode = 4;
982 else
983 return 0;
984
985 return enable_edo_mode(this, mode);
986 }
987 return 0;
988}
989
990
991void gpmi_begin(struct gpmi_nand_data *this)
992{
993 struct resources *r = &this->resources;
994 void __iomem *gpmi_regs = r->gpmi_regs;
995 unsigned int clock_period_in_ns;
996 uint32_t reg;
997 unsigned int dll_wait_time_in_us;
998 struct gpmi_nfc_hardware_timing hw;
999 int ret;
1000
1001
1002 ret = gpmi_enable_clk(this);
1003 if (ret) {
1004 dev_err(this->dev, "We failed in enable the clk\n");
1005 goto err_out;
1006 }
1007
1008
1009 if (this->flags & GPMI_TIMING_INIT_OK)
1010 return;
1011 this->flags |= GPMI_TIMING_INIT_OK;
1012
1013 if (this->flags & GPMI_ASYNC_EDO_ENABLED)
1014 gpmi_compute_edo_timing(this, &hw);
1015 else
1016 gpmi_nfc_compute_hardware_timing(this, &hw);
1017
1018
1019 reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
1020 BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
1021 BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles);
1022
1023 writel(reg, gpmi_regs + HW_GPMI_TIMING0);
1024
1025
1026 writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout),
1027 gpmi_regs + HW_GPMI_TIMING1);
1028
1029
1030
1031
1032 writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR);
1033 writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel),
1034 gpmi_regs + HW_GPMI_CTRL1_SET);
1035
1036
1037 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
1038
1039
1040 reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD;
1041 writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR);
1042
1043
1044 if (!hw.sample_delay_factor)
1045 return;
1046
1047
1048 reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0)
1049 | BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor);
1050
1051 writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET);
1052
1053
1054 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
1055
1056
1057
1058
1059
1060
1061 clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]);
1062 dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
1063
1064 if (!dll_wait_time_in_us)
1065 dll_wait_time_in_us = 1;
1066
1067
1068 udelay(dll_wait_time_in_us);
1069
1070err_out:
1071 return;
1072}
1073
1074void gpmi_end(struct gpmi_nand_data *this)
1075{
1076 gpmi_disable_clk(this);
1077}
1078
1079
1080void gpmi_clear_bch(struct gpmi_nand_data *this)
1081{
1082 struct resources *r = &this->resources;
1083 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
1084}
1085
1086
1087int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
1088{
1089 struct resources *r = &this->resources;
1090 uint32_t mask = 0;
1091 uint32_t reg = 0;
1092
1093 if (GPMI_IS_MX23(this)) {
1094 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
1095 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
1096 } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
1097
1098
1099
1100
1101 if (GPMI_IS_MX6(this))
1102 chip = 0;
1103
1104
1105 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
1106 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
1107 } else
1108 dev_err(this->dev, "unknow arch.\n");
1109 return reg & mask;
1110}
1111
1112static inline void set_dma_type(struct gpmi_nand_data *this,
1113 enum dma_ops_type type)
1114{
1115 this->last_dma_type = this->dma_type;
1116 this->dma_type = type;
1117}
1118
1119int gpmi_send_command(struct gpmi_nand_data *this)
1120{
1121 struct dma_chan *channel = get_dma_chan(this);
1122 struct dma_async_tx_descriptor *desc;
1123 struct scatterlist *sgl;
1124 int chip = this->current_chip;
1125 u32 pio[3];
1126
1127
1128 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
1129 | BM_GPMI_CTRL0_WORD_LENGTH
1130 | BF_GPMI_CTRL0_CS(chip, this)
1131 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1132 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
1133 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
1134 | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
1135 pio[1] = pio[2] = 0;
1136 desc = dmaengine_prep_slave_sg(channel,
1137 (struct scatterlist *)pio,
1138 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1139 if (!desc)
1140 return -EINVAL;
1141
1142
1143 sgl = &this->cmd_sgl;
1144
1145 sg_init_one(sgl, this->cmd_buffer, this->command_length);
1146 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
1147 desc = dmaengine_prep_slave_sg(channel,
1148 sgl, 1, DMA_MEM_TO_DEV,
1149 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1150 if (!desc)
1151 return -EINVAL;
1152
1153
1154 set_dma_type(this, DMA_FOR_COMMAND);
1155 return start_dma_without_bch_irq(this, desc);
1156}
1157
1158int gpmi_send_data(struct gpmi_nand_data *this)
1159{
1160 struct dma_async_tx_descriptor *desc;
1161 struct dma_chan *channel = get_dma_chan(this);
1162 int chip = this->current_chip;
1163 uint32_t command_mode;
1164 uint32_t address;
1165 u32 pio[2];
1166
1167
1168 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1169 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1170
1171 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1172 | BM_GPMI_CTRL0_WORD_LENGTH
1173 | BF_GPMI_CTRL0_CS(chip, this)
1174 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1175 | BF_GPMI_CTRL0_ADDRESS(address)
1176 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
1177 pio[1] = 0;
1178 desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
1179 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1180 if (!desc)
1181 return -EINVAL;
1182
1183
1184 prepare_data_dma(this, DMA_TO_DEVICE);
1185 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1186 1, DMA_MEM_TO_DEV,
1187 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1188 if (!desc)
1189 return -EINVAL;
1190
1191
1192 set_dma_type(this, DMA_FOR_WRITE_DATA);
1193 return start_dma_without_bch_irq(this, desc);
1194}
1195
1196int gpmi_read_data(struct gpmi_nand_data *this)
1197{
1198 struct dma_async_tx_descriptor *desc;
1199 struct dma_chan *channel = get_dma_chan(this);
1200 int chip = this->current_chip;
1201 u32 pio[2];
1202
1203
1204 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
1205 | BM_GPMI_CTRL0_WORD_LENGTH
1206 | BF_GPMI_CTRL0_CS(chip, this)
1207 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1208 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
1209 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
1210 pio[1] = 0;
1211 desc = dmaengine_prep_slave_sg(channel,
1212 (struct scatterlist *)pio,
1213 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1214 if (!desc)
1215 return -EINVAL;
1216
1217
1218 prepare_data_dma(this, DMA_FROM_DEVICE);
1219 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1220 1, DMA_DEV_TO_MEM,
1221 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1222 if (!desc)
1223 return -EINVAL;
1224
1225
1226 set_dma_type(this, DMA_FOR_READ_DATA);
1227 return start_dma_without_bch_irq(this, desc);
1228}
1229
1230int gpmi_send_page(struct gpmi_nand_data *this,
1231 dma_addr_t payload, dma_addr_t auxiliary)
1232{
1233 struct bch_geometry *geo = &this->bch_geometry;
1234 uint32_t command_mode;
1235 uint32_t address;
1236 uint32_t ecc_command;
1237 uint32_t buffer_mask;
1238 struct dma_async_tx_descriptor *desc;
1239 struct dma_chan *channel = get_dma_chan(this);
1240 int chip = this->current_chip;
1241 u32 pio[6];
1242
1243
1244 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1245 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1246 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
1247 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
1248 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1249
1250 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1251 | BM_GPMI_CTRL0_WORD_LENGTH
1252 | BF_GPMI_CTRL0_CS(chip, this)
1253 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1254 | BF_GPMI_CTRL0_ADDRESS(address)
1255 | BF_GPMI_CTRL0_XFER_COUNT(0);
1256 pio[1] = 0;
1257 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1258 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1259 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1260 pio[3] = geo->page_size;
1261 pio[4] = payload;
1262 pio[5] = auxiliary;
1263
1264 desc = dmaengine_prep_slave_sg(channel,
1265 (struct scatterlist *)pio,
1266 ARRAY_SIZE(pio), DMA_TRANS_NONE,
1267 DMA_CTRL_ACK);
1268 if (!desc)
1269 return -EINVAL;
1270
1271 set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
1272 return start_dma_with_bch_irq(this, desc);
1273}
1274
1275int gpmi_read_page(struct gpmi_nand_data *this,
1276 dma_addr_t payload, dma_addr_t auxiliary)
1277{
1278 struct bch_geometry *geo = &this->bch_geometry;
1279 uint32_t command_mode;
1280 uint32_t address;
1281 uint32_t ecc_command;
1282 uint32_t buffer_mask;
1283 struct dma_async_tx_descriptor *desc;
1284 struct dma_chan *channel = get_dma_chan(this);
1285 int chip = this->current_chip;
1286 u32 pio[6];
1287
1288
1289 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1290 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1291
1292 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1293 | BM_GPMI_CTRL0_WORD_LENGTH
1294 | BF_GPMI_CTRL0_CS(chip, this)
1295 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1296 | BF_GPMI_CTRL0_ADDRESS(address)
1297 | BF_GPMI_CTRL0_XFER_COUNT(0);
1298 pio[1] = 0;
1299 desc = dmaengine_prep_slave_sg(channel,
1300 (struct scatterlist *)pio, 2,
1301 DMA_TRANS_NONE, 0);
1302 if (!desc)
1303 return -EINVAL;
1304
1305
1306 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
1307 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1308 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
1309 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
1310 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1311
1312 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1313 | BM_GPMI_CTRL0_WORD_LENGTH
1314 | BF_GPMI_CTRL0_CS(chip, this)
1315 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1316 | BF_GPMI_CTRL0_ADDRESS(address)
1317 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1318
1319 pio[1] = 0;
1320 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1321 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1322 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1323 pio[3] = geo->page_size;
1324 pio[4] = payload;
1325 pio[5] = auxiliary;
1326 desc = dmaengine_prep_slave_sg(channel,
1327 (struct scatterlist *)pio,
1328 ARRAY_SIZE(pio), DMA_TRANS_NONE,
1329 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1330 if (!desc)
1331 return -EINVAL;
1332
1333
1334 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1335 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1336
1337 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1338 | BM_GPMI_CTRL0_WORD_LENGTH
1339 | BF_GPMI_CTRL0_CS(chip, this)
1340 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1341 | BF_GPMI_CTRL0_ADDRESS(address)
1342 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1343 pio[1] = 0;
1344 pio[2] = 0;
1345 desc = dmaengine_prep_slave_sg(channel,
1346 (struct scatterlist *)pio, 3,
1347 DMA_TRANS_NONE,
1348 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1349 if (!desc)
1350 return -EINVAL;
1351
1352
1353 set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
1354 return start_dma_with_bch_irq(this, desc);
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
1373 const u8 *src, size_t src_bit_off,
1374 size_t nbits)
1375{
1376 size_t i;
1377 size_t nbytes;
1378 u32 src_buffer = 0;
1379 size_t bits_in_src_buffer = 0;
1380
1381 if (!nbits)
1382 return;
1383
1384
1385
1386
1387
1388 src += src_bit_off / 8;
1389 src_bit_off %= 8;
1390
1391 dst += dst_bit_off / 8;
1392 dst_bit_off %= 8;
1393
1394
1395
1396
1397
1398 if (src_bit_off) {
1399 src_buffer = src[0] >> src_bit_off;
1400 if (nbits >= (8 - src_bit_off)) {
1401 bits_in_src_buffer += 8 - src_bit_off;
1402 } else {
1403 src_buffer &= GENMASK(nbits - 1, 0);
1404 bits_in_src_buffer += nbits;
1405 }
1406 nbits -= bits_in_src_buffer;
1407 src++;
1408 }
1409
1410
1411 nbytes = nbits / 8;
1412
1413
1414 if (dst_bit_off) {
1415 if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
1416 src_buffer |= src[0] << bits_in_src_buffer;
1417 bits_in_src_buffer += 8;
1418 src++;
1419 nbytes--;
1420 }
1421
1422 if (bits_in_src_buffer >= (8 - dst_bit_off)) {
1423 dst[0] &= GENMASK(dst_bit_off - 1, 0);
1424 dst[0] |= src_buffer << dst_bit_off;
1425 src_buffer >>= (8 - dst_bit_off);
1426 bits_in_src_buffer -= (8 - dst_bit_off);
1427 dst_bit_off = 0;
1428 dst++;
1429 if (bits_in_src_buffer > 7) {
1430 bits_in_src_buffer -= 8;
1431 dst[0] = src_buffer;
1432 dst++;
1433 src_buffer >>= 8;
1434 }
1435 }
1436 }
1437
1438 if (!bits_in_src_buffer && !dst_bit_off) {
1439
1440
1441
1442
1443 if (nbytes)
1444 memcpy(dst, src, nbytes);
1445 } else {
1446
1447
1448
1449
1450
1451 for (i = 0; i < nbytes; i++) {
1452 src_buffer |= src[i] << bits_in_src_buffer;
1453 dst[i] = src_buffer;
1454 src_buffer >>= 8;
1455 }
1456 }
1457
1458 dst += nbytes;
1459 src += nbytes;
1460
1461
1462
1463
1464
1465 nbits %= 8;
1466
1467
1468
1469
1470
1471 if (!nbits && !bits_in_src_buffer)
1472 return;
1473
1474
1475 if (nbits)
1476 src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
1477 bits_in_src_buffer;
1478 bits_in_src_buffer += nbits;
1479
1480
1481
1482
1483
1484
1485
1486 if (dst_bit_off)
1487 src_buffer = (src_buffer << dst_bit_off) |
1488 (*dst & GENMASK(dst_bit_off - 1, 0));
1489 bits_in_src_buffer += dst_bit_off;
1490
1491
1492
1493
1494
1495 nbytes = bits_in_src_buffer / 8;
1496 if (bits_in_src_buffer % 8) {
1497 src_buffer |= (dst[nbytes] &
1498 GENMASK(7, bits_in_src_buffer % 8)) <<
1499 (nbytes * 8);
1500 nbytes++;
1501 }
1502
1503
1504 for (i = 0; i < nbytes; i++) {
1505 dst[i] = src_buffer;
1506 src_buffer >>= 8;
1507 }
1508}
1509