1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/slab.h>
24
25#include "gpmi-nand.h"
26#include "gpmi-regs.h"
27#include "bch-regs.h"
28
29static struct timing_threshold timing_default_threshold = {
30 .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
31 BP_GPMI_TIMING0_DATA_SETUP),
32 .internal_data_setup_in_ns = 0,
33 .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >>
34 BP_GPMI_CTRL1_RDN_DELAY),
35 .max_dll_clock_period_in_ns = 32,
36 .max_dll_delay_in_ns = 16,
37};
38
39#define MXS_SET_ADDR 0x4
40#define MXS_CLR_ADDR 0x8
41
42
43
44
45
46static int clear_poll_bit(void __iomem *addr, u32 mask)
47{
48 int timeout = 0x400;
49
50
51 writel(mask, addr + MXS_CLR_ADDR);
52
53
54
55
56
57 udelay(1);
58
59
60 while ((readl(addr) & mask) && --timeout)
61 ;
62
63 return !timeout;
64}
65
66#define MODULE_CLKGATE (1 << 30)
67#define MODULE_SFTRST (1 << 31)
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
86{
87 int ret;
88 int timeout = 0x400;
89
90
91 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
92 if (unlikely(ret))
93 goto error;
94
95
96 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
97
98 if (!just_enable) {
99
100 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
101 udelay(1);
102
103
104 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
105 ;
106 if (unlikely(!timeout))
107 goto error;
108 }
109
110
111 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
112 if (unlikely(ret))
113 goto error;
114
115
116 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
117 if (unlikely(ret))
118 goto error;
119
120 return 0;
121
122error:
123 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
124 return -ETIMEDOUT;
125}
126
127static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
128{
129 struct clk *clk;
130 int ret;
131 int i;
132
133 for (i = 0; i < GPMI_CLK_MAX; i++) {
134 clk = this->resources.clock[i];
135 if (!clk)
136 break;
137
138 if (v) {
139 ret = clk_prepare_enable(clk);
140 if (ret)
141 goto err_clk;
142 } else {
143 clk_disable_unprepare(clk);
144 }
145 }
146 return 0;
147
148err_clk:
149 for (; i > 0; i--)
150 clk_disable_unprepare(this->resources.clock[i - 1]);
151 return ret;
152}
153
154#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
155#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
156
157int gpmi_init(struct gpmi_nand_data *this)
158{
159 struct resources *r = &this->resources;
160 int ret;
161
162 ret = gpmi_enable_clk(this);
163 if (ret)
164 return ret;
165 ret = gpmi_reset_block(r->gpmi_regs, false);
166 if (ret)
167 goto err_out;
168
169
170
171
172
173 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
174 if (ret)
175 goto err_out;
176
177
178
179 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
180
181
182 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
183 r->gpmi_regs + HW_GPMI_CTRL1_SET);
184
185
186 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
187
188
189 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
190
191
192
193
194
195 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
196
197 gpmi_disable_clk(this);
198 return 0;
199err_out:
200 gpmi_disable_clk(this);
201 return ret;
202}
203
204
205void gpmi_dump_info(struct gpmi_nand_data *this)
206{
207 struct resources *r = &this->resources;
208 struct bch_geometry *geo = &this->bch_geometry;
209 u32 reg;
210 int i;
211
212 dev_err(this->dev, "Show GPMI registers :\n");
213 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
214 reg = readl(r->gpmi_regs + i * 0x10);
215 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
216 }
217
218
219 dev_err(this->dev, "Show BCH registers :\n");
220 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
221 reg = readl(r->bch_regs + i * 0x10);
222 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
223 }
224 dev_err(this->dev, "BCH Geometry :\n"
225 "GF length : %u\n"
226 "ECC Strength : %u\n"
227 "Page Size in Bytes : %u\n"
228 "Metadata Size in Bytes : %u\n"
229 "ECC Chunk Size in Bytes: %u\n"
230 "ECC Chunk Count : %u\n"
231 "Payload Size in Bytes : %u\n"
232 "Auxiliary Size in Bytes: %u\n"
233 "Auxiliary Status Offset: %u\n"
234 "Block Mark Byte Offset : %u\n"
235 "Block Mark Bit Offset : %u\n",
236 geo->gf_len,
237 geo->ecc_strength,
238 geo->page_size,
239 geo->metadata_size,
240 geo->ecc_chunk_size,
241 geo->ecc_chunk_count,
242 geo->payload_size,
243 geo->auxiliary_size,
244 geo->auxiliary_status_offset,
245 geo->block_mark_byte_offset,
246 geo->block_mark_bit_offset);
247}
248
249
250int bch_set_geometry(struct gpmi_nand_data *this)
251{
252 struct resources *r = &this->resources;
253 struct bch_geometry *bch_geo = &this->bch_geometry;
254 unsigned int block_count;
255 unsigned int block_size;
256 unsigned int metadata_size;
257 unsigned int ecc_strength;
258 unsigned int page_size;
259 unsigned int gf_len;
260 int ret;
261
262 if (common_nfc_set_geometry(this))
263 return !0;
264
265 block_count = bch_geo->ecc_chunk_count - 1;
266 block_size = bch_geo->ecc_chunk_size;
267 metadata_size = bch_geo->metadata_size;
268 ecc_strength = bch_geo->ecc_strength >> 1;
269 page_size = bch_geo->page_size;
270 gf_len = bch_geo->gf_len;
271
272 ret = gpmi_enable_clk(this);
273 if (ret)
274 return ret;
275
276
277
278
279
280
281
282
283
284 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
285 if (ret)
286 goto err_out;
287
288
289 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
290 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
291 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
292 | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
293 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
294 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
295
296 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
297 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
298 | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
299 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
300 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
301
302
303 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
304
305
306 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
307 r->bch_regs + HW_BCH_CTRL_SET);
308
309 gpmi_disable_clk(this);
310 return 0;
311err_out:
312 gpmi_disable_clk(this);
313 return ret;
314}
315
316
317static unsigned int ns_to_cycles(unsigned int time,
318 unsigned int period, unsigned int min)
319{
320 unsigned int k;
321
322 k = (time + period - 1) / period;
323 return max(k, min);
324}
325
326#define DEF_MIN_PROP_DELAY 5
327#define DEF_MAX_PROP_DELAY 9
328
329static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
330 struct gpmi_nfc_hardware_timing *hw)
331{
332 struct timing_threshold *nfc = &timing_default_threshold;
333 struct resources *r = &this->resources;
334 struct nand_chip *nand = &this->nand;
335 struct nand_timing target = this->timing;
336 bool improved_timing_is_available;
337 unsigned long clock_frequency_in_hz;
338 unsigned int clock_period_in_ns;
339 bool dll_use_half_periods;
340 unsigned int dll_delay_shift;
341 unsigned int max_sample_delay_in_ns;
342 unsigned int address_setup_in_cycles;
343 unsigned int data_setup_in_ns;
344 unsigned int data_setup_in_cycles;
345 unsigned int data_hold_in_cycles;
346 int ideal_sample_delay_in_ns;
347 unsigned int sample_delay_factor;
348 int tEYE;
349 unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
350 unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
351
352
353
354
355
356 if (nand->numchips > 2) {
357 target.data_setup_in_ns += 10;
358 target.data_hold_in_ns += 10;
359 target.address_setup_in_ns += 10;
360 } else if (nand->numchips > 1) {
361 target.data_setup_in_ns += 5;
362 target.data_hold_in_ns += 5;
363 target.address_setup_in_ns += 5;
364 }
365
366
367 improved_timing_is_available =
368 (target.tREA_in_ns >= 0) &&
369 (target.tRLOH_in_ns >= 0) &&
370 (target.tRHOH_in_ns >= 0);
371
372
373 nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
374 clock_frequency_in_hz = nfc->clock_frequency_in_hz;
375 clock_period_in_ns = NSEC_PER_SEC / clock_frequency_in_hz;
376
377
378
379
380
381
382
383
384
385
386
387 data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns,
388 clock_period_in_ns, 1);
389 data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns,
390 clock_period_in_ns, 1);
391 address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
392 clock_period_in_ns, 0);
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452 if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
453 dll_use_half_periods = true;
454 dll_delay_shift = 3 + 1;
455 } else {
456 dll_use_half_periods = false;
457 dll_delay_shift = 3;
458 }
459
460
461
462
463
464
465 if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
466 max_sample_delay_in_ns = 0;
467 else {
468
469
470
471
472 max_sample_delay_in_ns =
473 (nfc->max_sample_delay_factor * clock_period_in_ns) >>
474 dll_delay_shift;
475
476
477
478
479
480 if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
481 max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
482 }
483
484
485
486
487
488 if (!improved_timing_is_available) {
489
490
491
492
493 ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
494 nfc->internal_data_setup_in_ns;
495
496
497
498
499
500
501
502
503
504
505
506 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
507 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
508
509 data_setup_in_cycles++;
510 ideal_sample_delay_in_ns -= clock_period_in_ns;
511
512 if (ideal_sample_delay_in_ns < 0)
513 ideal_sample_delay_in_ns = 0;
514
515 }
516
517
518
519
520
521
522
523
524
525
526 sample_delay_factor =
527 ns_to_cycles(
528 ideal_sample_delay_in_ns << dll_delay_shift,
529 clock_period_in_ns, 0);
530
531 if (sample_delay_factor > nfc->max_sample_delay_factor)
532 sample_delay_factor = nfc->max_sample_delay_factor;
533
534
535 goto return_results;
536 }
537
538
539
540
541
542
543
544
545
546
547 max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
548
549
550
551
552
553 data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
554
555
556
557
558
559
560
561
562
563
564 tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
565 (int)data_setup_in_ns;
566
567 tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
568
569
570
571
572
573
574
575
576
577 while ((tEYE <= 0) &&
578 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
579
580 data_setup_in_cycles++;
581
582 data_setup_in_ns += clock_period_in_ns;
583
584 tEYE += clock_period_in_ns;
585 }
586
587
588
589
590
591
592
593
594
595
596
597 ideal_sample_delay_in_ns =
598 ((int)max_prop_delay_in_ns +
599 (int)target.tREA_in_ns +
600 (int)min_prop_delay_in_ns +
601 (int)target.tRHOH_in_ns -
602 (int)data_setup_in_ns) >> 1;
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641 if (ideal_sample_delay_in_ns < 0)
642 ideal_sample_delay_in_ns = 0;
643
644
645
646
647
648 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
649 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
650
651
652 data_setup_in_cycles++;
653
654 data_setup_in_ns += clock_period_in_ns;
655
656 tEYE += clock_period_in_ns;
657
658
659
660
661
662 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
663
664
665 if (ideal_sample_delay_in_ns < 0)
666 ideal_sample_delay_in_ns = 0;
667 }
668
669
670
671
672
673
674
675
676
677
678 sample_delay_factor =
679 ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
680 clock_period_in_ns, 0);
681
682 if (sample_delay_factor > nfc->max_sample_delay_factor)
683 sample_delay_factor = nfc->max_sample_delay_factor;
684
685
686
687
688
689
690 #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns)
691
692 #define QUANTIZED_DELAY \
693 ((int) ((sample_delay_factor * clock_period_in_ns) >> \
694 dll_delay_shift))
695
696 #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY))
697
698 #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1))
699
700
701
702
703
704
705
706 while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
707 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
708
709
710
711
712
713 if (QUANTIZED_DELAY > IDEAL_DELAY) {
714
715
716
717
718
719 if (sample_delay_factor != 0)
720 sample_delay_factor--;
721 continue;
722 }
723
724
725
726
727
728
729
730
731 data_setup_in_cycles++;
732
733 data_setup_in_ns += clock_period_in_ns;
734
735 tEYE += clock_period_in_ns;
736
737
738
739
740
741 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
742
743
744 ideal_sample_delay_in_ns -= clock_period_in_ns;
745
746
747 if (ideal_sample_delay_in_ns < 0)
748 ideal_sample_delay_in_ns = 0;
749
750
751
752
753
754 sample_delay_factor =
755 ns_to_cycles(
756 ideal_sample_delay_in_ns << dll_delay_shift,
757 clock_period_in_ns, 0);
758
759 if (sample_delay_factor > nfc->max_sample_delay_factor)
760 sample_delay_factor = nfc->max_sample_delay_factor;
761 }
762
763
764return_results:
765 hw->data_setup_in_cycles = data_setup_in_cycles;
766 hw->data_hold_in_cycles = data_hold_in_cycles;
767 hw->address_setup_in_cycles = address_setup_in_cycles;
768 hw->use_half_periods = dll_use_half_periods;
769 hw->sample_delay_factor = sample_delay_factor;
770 hw->device_busy_timeout = GPMI_DEFAULT_BUSY_TIMEOUT;
771 hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
772
773
774 return 0;
775}
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
861 struct gpmi_nfc_hardware_timing *hw)
862{
863 struct resources *r = &this->resources;
864 unsigned long rate = clk_get_rate(r->clock[0]);
865 int mode = this->timing_mode;
866 int dll_threshold = this->devdata->max_chain_delay;
867 unsigned long delay;
868 unsigned long clk_period;
869 int t_rea;
870 int c = 4;
871 int t_rp;
872 int rp;
873
874
875
876
877
878
879
880
881 hw->data_setup_in_cycles = 1;
882 hw->data_hold_in_cycles = 1;
883 hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0);
884
885
886 hw->device_busy_timeout = 0x9000;
887
888
889 hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
890
891
892
893
894
895 clk_period = NSEC_PER_SEC / (rate / 10);
896 dll_threshold *= 10;
897 t_rea = ((mode == 5) ? 16 : 20) * 10;
898 c *= 10;
899
900 t_rp = clk_period * 1;
901
902 if (clk_period > dll_threshold) {
903 hw->use_half_periods = 1;
904 rp = clk_period / 2;
905 } else {
906 hw->use_half_periods = 0;
907 rp = clk_period;
908 }
909
910
911
912
913
914 delay = (((t_rea + c - t_rp) * 8) * 10) / rp;
915 delay = (delay + 5) / 10;
916
917 hw->sample_delay_factor = delay;
918}
919
920static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
921{
922 struct resources *r = &this->resources;
923 struct nand_chip *nand = &this->nand;
924 struct mtd_info *mtd = nand_to_mtd(nand);
925 uint8_t *feature;
926 unsigned long rate;
927 int ret;
928
929 feature = kzalloc(ONFI_SUBFEATURE_PARAM_LEN, GFP_KERNEL);
930 if (!feature)
931 return -ENOMEM;
932
933 nand->select_chip(mtd, 0);
934
935
936 feature[0] = mode;
937 ret = nand->onfi_set_features(mtd, nand,
938 ONFI_FEATURE_ADDR_TIMING_MODE, feature);
939 if (ret)
940 goto err_out;
941
942
943 memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN);
944 ret = nand->onfi_get_features(mtd, nand,
945 ONFI_FEATURE_ADDR_TIMING_MODE, feature);
946 if (ret || feature[0] != mode)
947 goto err_out;
948
949 nand->select_chip(mtd, -1);
950
951
952 rate = (mode == 5) ? 100000000 : 80000000;
953 clk_set_rate(r->clock[0], rate);
954
955
956 this->flags &= ~GPMI_TIMING_INIT_OK;
957
958 this->flags |= GPMI_ASYNC_EDO_ENABLED;
959 this->timing_mode = mode;
960 kfree(feature);
961 dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
962 return 0;
963
964err_out:
965 nand->select_chip(mtd, -1);
966 kfree(feature);
967 dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
968 return -EINVAL;
969}
970
971int gpmi_extra_init(struct gpmi_nand_data *this)
972{
973 struct nand_chip *chip = &this->nand;
974
975
976 if (GPMI_IS_MX6(this) && chip->onfi_version) {
977 int mode = onfi_get_async_timing_mode(chip);
978
979
980 if (mode & ONFI_TIMING_MODE_5)
981 mode = 5;
982 else if (mode & ONFI_TIMING_MODE_4)
983 mode = 4;
984 else
985 return 0;
986
987 return enable_edo_mode(this, mode);
988 }
989 return 0;
990}
991
992
993void gpmi_begin(struct gpmi_nand_data *this)
994{
995 struct resources *r = &this->resources;
996 void __iomem *gpmi_regs = r->gpmi_regs;
997 unsigned int clock_period_in_ns;
998 uint32_t reg;
999 unsigned int dll_wait_time_in_us;
1000 struct gpmi_nfc_hardware_timing hw;
1001 int ret;
1002
1003
1004 ret = gpmi_enable_clk(this);
1005 if (ret) {
1006 dev_err(this->dev, "We failed in enable the clk\n");
1007 goto err_out;
1008 }
1009
1010
1011 if (this->flags & GPMI_TIMING_INIT_OK)
1012 return;
1013 this->flags |= GPMI_TIMING_INIT_OK;
1014
1015 if (this->flags & GPMI_ASYNC_EDO_ENABLED)
1016 gpmi_compute_edo_timing(this, &hw);
1017 else
1018 gpmi_nfc_compute_hardware_timing(this, &hw);
1019
1020
1021 reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
1022 BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
1023 BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles);
1024
1025 writel(reg, gpmi_regs + HW_GPMI_TIMING0);
1026
1027
1028 writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout),
1029 gpmi_regs + HW_GPMI_TIMING1);
1030
1031
1032
1033
1034 writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR);
1035 writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel),
1036 gpmi_regs + HW_GPMI_CTRL1_SET);
1037
1038
1039 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
1040
1041
1042 reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD;
1043 writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR);
1044
1045
1046 if (!hw.sample_delay_factor)
1047 return;
1048
1049
1050 reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0)
1051 | BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor);
1052
1053 writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET);
1054
1055
1056 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
1057
1058
1059
1060
1061
1062
1063 clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]);
1064 dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
1065
1066 if (!dll_wait_time_in_us)
1067 dll_wait_time_in_us = 1;
1068
1069
1070 udelay(dll_wait_time_in_us);
1071
1072err_out:
1073 return;
1074}
1075
1076void gpmi_end(struct gpmi_nand_data *this)
1077{
1078 gpmi_disable_clk(this);
1079}
1080
1081
1082void gpmi_clear_bch(struct gpmi_nand_data *this)
1083{
1084 struct resources *r = &this->resources;
1085 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
1086}
1087
1088
1089int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
1090{
1091 struct resources *r = &this->resources;
1092 uint32_t mask = 0;
1093 uint32_t reg = 0;
1094
1095 if (GPMI_IS_MX23(this)) {
1096 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
1097 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
1098 } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
1099
1100
1101
1102
1103 if (GPMI_IS_MX6(this))
1104 chip = 0;
1105
1106
1107 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
1108 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
1109 } else
1110 dev_err(this->dev, "unknown arch.\n");
1111 return reg & mask;
1112}
1113
1114static inline void set_dma_type(struct gpmi_nand_data *this,
1115 enum dma_ops_type type)
1116{
1117 this->last_dma_type = this->dma_type;
1118 this->dma_type = type;
1119}
1120
1121int gpmi_send_command(struct gpmi_nand_data *this)
1122{
1123 struct dma_chan *channel = get_dma_chan(this);
1124 struct dma_async_tx_descriptor *desc;
1125 struct scatterlist *sgl;
1126 int chip = this->current_chip;
1127 u32 pio[3];
1128
1129
1130 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
1131 | BM_GPMI_CTRL0_WORD_LENGTH
1132 | BF_GPMI_CTRL0_CS(chip, this)
1133 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1134 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
1135 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
1136 | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
1137 pio[1] = pio[2] = 0;
1138 desc = dmaengine_prep_slave_sg(channel,
1139 (struct scatterlist *)pio,
1140 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1141 if (!desc)
1142 return -EINVAL;
1143
1144
1145 sgl = &this->cmd_sgl;
1146
1147 sg_init_one(sgl, this->cmd_buffer, this->command_length);
1148 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
1149 desc = dmaengine_prep_slave_sg(channel,
1150 sgl, 1, DMA_MEM_TO_DEV,
1151 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1152 if (!desc)
1153 return -EINVAL;
1154
1155
1156 set_dma_type(this, DMA_FOR_COMMAND);
1157 return start_dma_without_bch_irq(this, desc);
1158}
1159
1160int gpmi_send_data(struct gpmi_nand_data *this)
1161{
1162 struct dma_async_tx_descriptor *desc;
1163 struct dma_chan *channel = get_dma_chan(this);
1164 int chip = this->current_chip;
1165 uint32_t command_mode;
1166 uint32_t address;
1167 u32 pio[2];
1168
1169
1170 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1171 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1172
1173 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1174 | BM_GPMI_CTRL0_WORD_LENGTH
1175 | BF_GPMI_CTRL0_CS(chip, this)
1176 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1177 | BF_GPMI_CTRL0_ADDRESS(address)
1178 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
1179 pio[1] = 0;
1180 desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
1181 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1182 if (!desc)
1183 return -EINVAL;
1184
1185
1186 prepare_data_dma(this, DMA_TO_DEVICE);
1187 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1188 1, DMA_MEM_TO_DEV,
1189 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1190 if (!desc)
1191 return -EINVAL;
1192
1193
1194 set_dma_type(this, DMA_FOR_WRITE_DATA);
1195 return start_dma_without_bch_irq(this, desc);
1196}
1197
1198int gpmi_read_data(struct gpmi_nand_data *this)
1199{
1200 struct dma_async_tx_descriptor *desc;
1201 struct dma_chan *channel = get_dma_chan(this);
1202 int chip = this->current_chip;
1203 u32 pio[2];
1204
1205
1206 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
1207 | BM_GPMI_CTRL0_WORD_LENGTH
1208 | BF_GPMI_CTRL0_CS(chip, this)
1209 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1210 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
1211 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
1212 pio[1] = 0;
1213 desc = dmaengine_prep_slave_sg(channel,
1214 (struct scatterlist *)pio,
1215 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
1216 if (!desc)
1217 return -EINVAL;
1218
1219
1220 prepare_data_dma(this, DMA_FROM_DEVICE);
1221 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
1222 1, DMA_DEV_TO_MEM,
1223 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1224 if (!desc)
1225 return -EINVAL;
1226
1227
1228 set_dma_type(this, DMA_FOR_READ_DATA);
1229 return start_dma_without_bch_irq(this, desc);
1230}
1231
1232int gpmi_send_page(struct gpmi_nand_data *this,
1233 dma_addr_t payload, dma_addr_t auxiliary)
1234{
1235 struct bch_geometry *geo = &this->bch_geometry;
1236 uint32_t command_mode;
1237 uint32_t address;
1238 uint32_t ecc_command;
1239 uint32_t buffer_mask;
1240 struct dma_async_tx_descriptor *desc;
1241 struct dma_chan *channel = get_dma_chan(this);
1242 int chip = this->current_chip;
1243 u32 pio[6];
1244
1245
1246 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1247 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1248 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
1249 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
1250 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1251
1252 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1253 | BM_GPMI_CTRL0_WORD_LENGTH
1254 | BF_GPMI_CTRL0_CS(chip, this)
1255 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1256 | BF_GPMI_CTRL0_ADDRESS(address)
1257 | BF_GPMI_CTRL0_XFER_COUNT(0);
1258 pio[1] = 0;
1259 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1260 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1261 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1262 pio[3] = geo->page_size;
1263 pio[4] = payload;
1264 pio[5] = auxiliary;
1265
1266 desc = dmaengine_prep_slave_sg(channel,
1267 (struct scatterlist *)pio,
1268 ARRAY_SIZE(pio), DMA_TRANS_NONE,
1269 DMA_CTRL_ACK);
1270 if (!desc)
1271 return -EINVAL;
1272
1273 set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
1274 return start_dma_with_bch_irq(this, desc);
1275}
1276
1277int gpmi_read_page(struct gpmi_nand_data *this,
1278 dma_addr_t payload, dma_addr_t auxiliary)
1279{
1280 struct bch_geometry *geo = &this->bch_geometry;
1281 uint32_t command_mode;
1282 uint32_t address;
1283 uint32_t ecc_command;
1284 uint32_t buffer_mask;
1285 struct dma_async_tx_descriptor *desc;
1286 struct dma_chan *channel = get_dma_chan(this);
1287 int chip = this->current_chip;
1288 u32 pio[6];
1289
1290
1291 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1292 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1293
1294 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1295 | BM_GPMI_CTRL0_WORD_LENGTH
1296 | BF_GPMI_CTRL0_CS(chip, this)
1297 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1298 | BF_GPMI_CTRL0_ADDRESS(address)
1299 | BF_GPMI_CTRL0_XFER_COUNT(0);
1300 pio[1] = 0;
1301 desc = dmaengine_prep_slave_sg(channel,
1302 (struct scatterlist *)pio, 2,
1303 DMA_TRANS_NONE, 0);
1304 if (!desc)
1305 return -EINVAL;
1306
1307
1308 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
1309 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1310 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
1311 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
1312 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1313
1314 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1315 | BM_GPMI_CTRL0_WORD_LENGTH
1316 | BF_GPMI_CTRL0_CS(chip, this)
1317 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1318 | BF_GPMI_CTRL0_ADDRESS(address)
1319 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1320
1321 pio[1] = 0;
1322 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1323 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1324 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1325 pio[3] = geo->page_size;
1326 pio[4] = payload;
1327 pio[5] = auxiliary;
1328 desc = dmaengine_prep_slave_sg(channel,
1329 (struct scatterlist *)pio,
1330 ARRAY_SIZE(pio), DMA_TRANS_NONE,
1331 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1332 if (!desc)
1333 return -EINVAL;
1334
1335
1336 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1337 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1338
1339 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1340 | BM_GPMI_CTRL0_WORD_LENGTH
1341 | BF_GPMI_CTRL0_CS(chip, this)
1342 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1343 | BF_GPMI_CTRL0_ADDRESS(address)
1344 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1345 pio[1] = 0;
1346 pio[2] = 0;
1347 desc = dmaengine_prep_slave_sg(channel,
1348 (struct scatterlist *)pio, 3,
1349 DMA_TRANS_NONE,
1350 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1351 if (!desc)
1352 return -EINVAL;
1353
1354
1355 set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
1356 return start_dma_with_bch_irq(this, desc);
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
1375 const u8 *src, size_t src_bit_off,
1376 size_t nbits)
1377{
1378 size_t i;
1379 size_t nbytes;
1380 u32 src_buffer = 0;
1381 size_t bits_in_src_buffer = 0;
1382
1383 if (!nbits)
1384 return;
1385
1386
1387
1388
1389
1390 src += src_bit_off / 8;
1391 src_bit_off %= 8;
1392
1393 dst += dst_bit_off / 8;
1394 dst_bit_off %= 8;
1395
1396
1397
1398
1399
1400 if (src_bit_off) {
1401 src_buffer = src[0] >> src_bit_off;
1402 if (nbits >= (8 - src_bit_off)) {
1403 bits_in_src_buffer += 8 - src_bit_off;
1404 } else {
1405 src_buffer &= GENMASK(nbits - 1, 0);
1406 bits_in_src_buffer += nbits;
1407 }
1408 nbits -= bits_in_src_buffer;
1409 src++;
1410 }
1411
1412
1413 nbytes = nbits / 8;
1414
1415
1416 if (dst_bit_off) {
1417 if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
1418 src_buffer |= src[0] << bits_in_src_buffer;
1419 bits_in_src_buffer += 8;
1420 src++;
1421 nbytes--;
1422 }
1423
1424 if (bits_in_src_buffer >= (8 - dst_bit_off)) {
1425 dst[0] &= GENMASK(dst_bit_off - 1, 0);
1426 dst[0] |= src_buffer << dst_bit_off;
1427 src_buffer >>= (8 - dst_bit_off);
1428 bits_in_src_buffer -= (8 - dst_bit_off);
1429 dst_bit_off = 0;
1430 dst++;
1431 if (bits_in_src_buffer > 7) {
1432 bits_in_src_buffer -= 8;
1433 dst[0] = src_buffer;
1434 dst++;
1435 src_buffer >>= 8;
1436 }
1437 }
1438 }
1439
1440 if (!bits_in_src_buffer && !dst_bit_off) {
1441
1442
1443
1444
1445 if (nbytes)
1446 memcpy(dst, src, nbytes);
1447 } else {
1448
1449
1450
1451
1452
1453 for (i = 0; i < nbytes; i++) {
1454 src_buffer |= src[i] << bits_in_src_buffer;
1455 dst[i] = src_buffer;
1456 src_buffer >>= 8;
1457 }
1458 }
1459
1460 dst += nbytes;
1461 src += nbytes;
1462
1463
1464
1465
1466
1467 nbits %= 8;
1468
1469
1470
1471
1472
1473 if (!nbits && !bits_in_src_buffer)
1474 return;
1475
1476
1477 if (nbits)
1478 src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
1479 bits_in_src_buffer;
1480 bits_in_src_buffer += nbits;
1481
1482
1483
1484
1485
1486
1487
1488 if (dst_bit_off)
1489 src_buffer = (src_buffer << dst_bit_off) |
1490 (*dst & GENMASK(dst_bit_off - 1, 0));
1491 bits_in_src_buffer += dst_bit_off;
1492
1493
1494
1495
1496
1497 nbytes = bits_in_src_buffer / 8;
1498 if (bits_in_src_buffer % 8) {
1499 src_buffer |= (dst[nbytes] &
1500 GENMASK(7, bits_in_src_buffer % 8)) <<
1501 (nbytes * 8);
1502 nbytes++;
1503 }
1504
1505
1506 for (i = 0; i < nbytes; i++) {
1507 dst[i] = src_buffer;
1508 src_buffer >>= 8;
1509 }
1510}
1511