1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
23
24#include <linux/fs.h>
25#include <linux/ioport.h>
26#include <linux/timer.h>
27#include <linux/gpio.h>
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/scatterlist.h>
31#include <linux/mm.h>
32#include <linux/irq.h>
33#include <linux/slab.h>
34#include <linux/sched.h>
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/../../arch/arm/plat-omap/include/plat/mux.h>
47#include <linux/../../arch/arm/plat-omap/include/plat/gpmc.h>
48#include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h"
49#include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h"
50#include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h"
51#include "../../../include/linux/westbridge/cyaserr.h"
52#include "../../../include/linux/westbridge/cyasregs.h"
53#include "../../../include/linux/westbridge/cyasdma.h"
54#include "../../../include/linux/westbridge/cyasintr.h"
55
56#define HAL_REV "1.1.0"
57
58
59
60
61#define PNAND_16BIT_MODE
62
63
64
65
66
67
68
69
70
71#define PNAND_LBD_READ_NO_PFE
72
73
74
75
76
77
78
79#define MAX_DRQ_LOOPS_IN_ISR 4
80
81
82
83
84
85
86
87
88
89
90
91
92
93 #define CYASSTORAGE_WRITE_EP_NUM (4)
94 #define CYASSTORAGE_READ_EP_NUM (8)
95
96
97
98
99
100#define CYASSTORAGE_MAX_XFER_SIZE (2*32768)
101
102
103
104
105#define HAL_DMA_PKT_SZ 512
106
107#define is_storage_e_p(ep) (((ep) == 2) || ((ep) == 4) || \
108 ((ep) == 6) || ((ep) == 8))
109
110
111
112
113static uint8_t pnand_16bit;
114
115
116
117
118#define PROCESS_MULTIPLE_DRQ_IN_ISR (1)
119
120
121
122
123
124#define CASDO 0x05
125#define CASDI 0x85
126#define RDPAGE_B1 0x00
127#define RDPAGE_B2 0x30
128#define PGMPAGE_B1 0x80
129#define PGMPAGE_B2 0x10
130
131
132
133
134typedef enum cy_as_hal_dma_type {
135 cy_as_hal_read,
136 cy_as_hal_write,
137 cy_as_hal_none
138} cy_as_hal_dma_type;
139
140
141
142
143
144
145
146
147
148typedef struct cy_as_hal_endpoint_dma {
149 cy_bool buffer_valid;
150 uint8_t *data_p;
151 uint32_t size;
152
153
154
155
156
157
158
159
160
161
162
163
164 bool sg_list_enabled;
165 struct scatterlist *sg_p;
166 uint16_t dma_xfer_sz;
167 uint32_t seg_xfer_cnt;
168 uint16_t req_xfer_cnt;
169 uint16_t req_length;
170 cy_as_hal_dma_type type;
171 cy_bool pending;
172} cy_as_hal_endpoint_dma;
173
174
175
176
177static cy_as_omap_dev_kernel *m_omap_list_p;
178
179
180
181
182static cy_as_hal_dma_complete_callback callback;
183
184
185
186
187static cy_as_hal_endpoint_dma end_points[16];
188
189
190
191
192static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p);
193
194static uint16_t intr_sequence_num;
195static uint8_t intr__enable;
196spinlock_t int_lock;
197
198static u32 iomux_vma;
199static u32 csa_phy;
200
201
202
203
204static u32 gpmc_base;
205
206
207
208
209static u32 gpmc_data_vma;
210static u32 ndata_reg_vma;
211static u32 ncmd_reg_vma;
212static u32 naddr_reg_vma;
213
214
215
216
217static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff);
218static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff);
219static inline u16 __attribute__((always_inline))
220 ast_p_nand_casdo_read(u8 reg_addr8);
221static inline void __attribute__((always_inline))
222 ast_p_nand_casdi_write(u8 reg_addr8, u16 data);
223
224
225
226
227static void cy_as_hal_print_omap_regs(char *name_prefix,
228 u8 name_base, u32 virt_base, u16 count)
229{
230 u32 reg_val, reg_addr;
231 u16 i;
232 cy_as_hal_print_message(KERN_INFO "\n");
233 for (i = 0; i < count; i++) {
234
235 reg_addr = virt_base + (i*4);
236
237 reg_val = __raw_readl(reg_addr);
238 cy_as_hal_print_message(KERN_INFO "%s_%d[%8.8x]=%8.8x\n",
239 name_prefix, name_base+i,
240 reg_addr, reg_val);
241 }
242}
243
244
245
246
247static u16 omap_cfg_reg_L(u32 pad_func_index)
248{
249 static u8 sanity_check = 1;
250
251 u32 reg_vma;
252 u16 cur_val, wr_val, rdback_val;
253
254
255
256
257 cy_as_hal_print_message(KERN_INFO" OMAP pins user_pad cfg ");
258 if (sanity_check) {
259 if ((omap_mux_pin_cfg[END_OF_TABLE].name[0] == 'E') &&
260 (omap_mux_pin_cfg[END_OF_TABLE].name[1] == 'N') &&
261 (omap_mux_pin_cfg[END_OF_TABLE].name[2] == 'D')) {
262
263 cy_as_hal_print_message(KERN_INFO
264 "table is good.\n");
265 } else {
266 cy_as_hal_print_message(KERN_WARNING
267 "table is bad, fix it");
268 }
269
270
271
272 sanity_check = 0;
273 }
274
275
276
277
278 reg_vma = (u32)iomux_vma + omap_mux_pin_cfg[pad_func_index].offset;
279
280
281
282
283 wr_val = omap_mux_pin_cfg[pad_func_index].mux_val;
284 cur_val = IORD16(reg_vma);
285
286
287
288
289
290 IOWR16(reg_vma, wr_val);
291 rdback_val = IORD16(reg_vma);
292
293
294
295
296 return wr_val;
297}
298
299#define BLKSZ_4K 0x1000
300
301
302
303
304void cy_as_hal_gpmc_enable_16bit_bus(bool dbus16_enabled)
305{
306 uint32_t tmp32;
307
308
309
310
311 tmp32 = gpmc_cs_read_reg(AST_GPMC_CS,
312 GPMC_CS_CONFIG7) & ~GPMC_CONFIG7_CSVALID;
313 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
314
315
316
317
318 if (dbus16_enabled) {
319 DBGPRN("enabling 16 bit bus\n");
320 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
321 (GPMC_CONFIG1_DEVICETYPE(2) |
322 GPMC_CONFIG1_WAIT_PIN_SEL(2) |
323 GPMC_CONFIG1_DEVICESIZE_16)
324 );
325 } else {
326 DBGPRN(KERN_INFO "enabling 8 bit bus\n");
327 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
328 (GPMC_CONFIG1_DEVICETYPE(2) |
329 GPMC_CONFIG1_WAIT_PIN_SEL(2))
330 );
331 }
332
333
334
335
336 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
337 (tmp32 | GPMC_CONFIG7_CSVALID));
338
339
340
341
342 pnand_16bit = dbus16_enabled;
343}
344
345static int cy_as_hal_gpmc_init(void)
346{
347 u32 tmp32;
348 int err;
349 struct gpmc_timings timings;
350
351 gpmc_base = (u32)ioremap_nocache(OMAP34XX_GPMC_BASE, BLKSZ_4K);
352 DBGPRN(KERN_INFO "kernel has gpmc_base=%x , val@ the base=%x",
353 gpmc_base, __raw_readl(gpmc_base)
354 );
355
356
357
358
359 ncmd_reg_vma = GPMC_VMA(AST_GPMC_NAND_CMD);
360 naddr_reg_vma = GPMC_VMA(AST_GPMC_NAND_ADDR);
361 ndata_reg_vma = GPMC_VMA(AST_GPMC_NAND_DATA);
362
363
364
365
366 if (gpmc_cs_request(AST_GPMC_CS, SZ_16M, (void *)&csa_phy) < 0) {
367 cy_as_hal_print_message(KERN_ERR "error failed to request"
368 "ncs4 for ASTORIA\n");
369 return -1;
370 } else {
371 DBGPRN(KERN_INFO "got phy_addr:%x for "
372 "GPMC CS%d GPMC_CFGREG7[CS4]\n",
373 csa_phy, AST_GPMC_CS);
374 }
375
376
377
378
379
380
381
382 if (!request_mem_region(csa_phy, BLKSZ_4K, "AST_OMAP_HAL")) {
383 err = -EBUSY;
384 cy_as_hal_print_message(KERN_ERR "error MEM region "
385 "request for phy_addr:%x failed\n",
386 csa_phy);
387 goto out_free_cs;
388 }
389
390
391
392
393 gpmc_data_vma = (u32)ioremap_nocache(csa_phy, BLKSZ_4K);
394 if (!gpmc_data_vma) {
395 err = -ENOMEM;
396 cy_as_hal_print_message(KERN_ERR "error- ioremap()"
397 "for phy_addr:%x failed", csa_phy);
398
399 goto out_release_mem_region;
400 }
401 cy_as_hal_print_message(KERN_INFO "ioremap(%x) returned vma=%x\n",
402 csa_phy, gpmc_data_vma);
403
404 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
405 (GPMC_CONFIG1_DEVICETYPE(2) |
406 GPMC_CONFIG1_WAIT_PIN_SEL(2)));
407
408 memset(&timings, 0, sizeof(timings));
409
410
411 timings.cs_on = WB_GPMC_CS_t_o_n;
412 timings.cs_wr_off = WB_GPMC_BUSCYC_t;
413 timings.cs_rd_off = WB_GPMC_BUSCYC_t;
414
415
416 timings.adv_on = WB_GPMC_ADV_t_o_n;
417 timings.adv_rd_off = WB_GPMC_BUSCYC_t;
418 timings.adv_wr_off = WB_GPMC_BUSCYC_t;
419
420
421 timings.oe_on = WB_GPMC_OE_t_o_n;
422 timings.oe_off = WB_GPMC_OE_t_o_f_f;
423 timings.access = WB_GPMC_RD_t_a_c_c;
424 timings.rd_cycle = WB_GPMC_BUSCYC_t;
425
426
427 timings.we_on = WB_GPMC_WE_t_o_n;
428 timings.we_off = WB_GPMC_WE_t_o_f_f;
429 timings.wr_access = WB_GPMC_WR_t_a_c_c;
430 timings.wr_cycle = WB_GPMC_BUSCYC_t;
431
432 timings.page_burst_access = WB_GPMC_BUSCYC_t;
433 timings.wr_data_mux_bus = WB_GPMC_BUSCYC_t;
434 gpmc_cs_set_timings(AST_GPMC_CS, &timings);
435
436 cy_as_hal_print_omap_regs("GPMC_CONFIG", 1,
437 GPMC_VMA(GPMC_CFG_REG(1, AST_GPMC_CS)), 7);
438
439
440
441
442
443 tmp32 = gpmc_cs_read_reg(AST_GPMC_CS, GPMC_CS_CONFIG7) &
444 ~GPMC_CONFIG7_CSVALID;
445 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
446
447
448
449
450 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
451 (AS_CS_MASK | AS_CS_BADDR));
452
453
454
455
456
457 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
458 (GPMC_CONFIG1_DEVICETYPE(2) |
459 GPMC_CONFIG1_WAIT_PIN_SEL(2)));
460
461
462
463
464 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
465 (tmp32 | GPMC_CONFIG7_CSVALID));
466
467
468
469
470
471 tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
472 tmp32 = tmp32 | NAND_FORCE_POSTED_WRITE_B | 0x40;
473 IOWR32(GPMC_VMA(GPMC_CONFIG_REG), tmp32);
474
475 tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
476 cy_as_hal_print_message("GPMC_CONFIG_REG=0x%x\n", tmp32);
477
478 return 0;
479
480out_release_mem_region:
481 release_mem_region(csa_phy, BLKSZ_4K);
482
483out_free_cs:
484 gpmc_cs_free(AST_GPMC_CS);
485
486 return err;
487}
488
489
490
491
492static irqreturn_t cy_astoria_int_handler(int irq,
493 void *dev_id, struct pt_regs *regs)
494{
495 cy_as_omap_dev_kernel *dev_p;
496 uint16_t read_val = 0;
497 uint16_t mask_val = 0;
498
499
500
501
502 uint16_t drq_loop_cnt = 0;
503 uint8_t irq_pin;
504
505
506
507 const uint16_t sentinel = (CY_AS_MEM_P0_INTR_REG_MCUINT |
508 CY_AS_MEM_P0_INTR_REG_MBINT |
509 CY_AS_MEM_P0_INTR_REG_PMINT |
510 CY_AS_MEM_P0_INTR_REG_PLLLOCKINT);
511
512
513
514
515 irq_pin = __gpio_get_value(AST_INT);
516
517
518
519
520 intr_sequence_num++;
521
522
523
524
525 dev_p = dev_id;
526
527
528
529
530 read_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
531 CY_AS_MEM_P0_INTR_REG);
532
533
534
535
536 mask_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
537 CY_AS_MEM_P0_INT_MASK_REG);
538
539 DBGPRN("<1>HAL__intr__enter:_seq:%d, P0_INTR_REG:%x\n",
540 intr_sequence_num, read_val);
541
542
543
544
545 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
546 CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
547
548
549
550
551 if (read_val & CY_AS_MEM_P0_INTR_REG_DRQINT) {
552
553 do {
554
555
556
557 drq_loop_cnt++;
558
559 cy_handle_d_r_q_interrupt(dev_p);
560
561
562
563
564
565 if (drq_loop_cnt >= MAX_DRQ_LOOPS_IN_ISR)
566 break;
567
568
569
570
571 } while (cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
572 CY_AS_MEM_P0_INTR_REG) &
573 CY_AS_MEM_P0_INTR_REG_DRQINT);
574 }
575
576 if (read_val & sentinel)
577 cy_as_intr_service_interrupt((cy_as_hal_device_tag)dev_p);
578
579 DBGPRN("<1>_hal:_intr__exit seq:%d, mask=%4.4x,"
580 "int_pin:%d DRQ_jobs:%d\n",
581 intr_sequence_num,
582 mask_val,
583 irq_pin,
584 drq_loop_cnt);
585
586
587
588
589 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
590 CY_AS_MEM_P0_INT_MASK_REG, mask_val);
591
592 return IRQ_HANDLED;
593}
594
595static int cy_as_hal_configure_interrupts(void *dev_p)
596{
597 int result;
598 int irq_pin = AST_INT;
599
600 irq_set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW);
601
602
603
604
605
606 result = request_irq(OMAP_GPIO_IRQ(irq_pin),
607 (irq_handler_t)cy_astoria_int_handler,
608 IRQF_SHARED, "AST_INT#", dev_p);
609
610 if (result == 0) {
611
612
613
614
615
616
617 cy_as_hal_print_message(KERN_INFO"AST_INT omap_pin:"
618 "%d assigned IRQ #%d IRQEN1=%d\n",
619 irq_pin,
620 OMAP_GPIO_IRQ(irq_pin),
621 OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1)
622 );
623 } else {
624 cy_as_hal_print_message("cyasomaphal: interrupt "
625 "failed to register\n");
626 gpio_free(irq_pin);
627 cy_as_hal_print_message(KERN_WARNING
628 "ASTORIA: can't get assigned IRQ"
629 "%i for INT#\n", OMAP_GPIO_IRQ(irq_pin));
630 }
631
632 return result;
633}
634
635
636
637
638static void cy_as_hal_init_user_pads(user_pad_cfg_t *pad_cfg_tab)
639{
640
641
642
643 u32 in_level = 0;
644 u16 tmp16, mux_val;
645
646 while (pad_cfg_tab->name != NULL) {
647
648 if (gpio_request(pad_cfg_tab->pin_num, NULL) == 0) {
649
650 pad_cfg_tab->valid = 1;
651 mux_val = omap_cfg_reg_L(pad_cfg_tab->mux_func);
652
653
654
655
656 __gpio_set_value(pad_cfg_tab->pin_num,
657 pad_cfg_tab->drv);
658
659
660
661
662
663 if (pad_cfg_tab->dir)
664 gpio_direction_input(pad_cfg_tab->pin_num);
665 else
666 gpio_direction_output(pad_cfg_tab->pin_num,
667 pad_cfg_tab->drv);
668
669
670 in_level = __gpio_get_value(pad_cfg_tab->pin_num);
671
672 cy_as_hal_print_message(KERN_INFO "configured %s to "
673 "OMAP pad_%d, DIR=%d "
674 "DOUT=%d, DIN=%d\n",
675 pad_cfg_tab->name,
676 pad_cfg_tab->pin_num,
677 pad_cfg_tab->dir,
678 pad_cfg_tab->drv,
679 in_level
680 );
681 } else {
682
683
684
685 cy_as_hal_print_message(KERN_INFO "couldn't cfg pin %d"
686 "for signal %s, its already taken\n",
687 pad_cfg_tab->pin_num,
688 pad_cfg_tab->name);
689 }
690
691 tmp16 = *(u16 *)PADCFG_VMA
692 (omap_mux_pin_cfg[pad_cfg_tab->mux_func].offset);
693
694 cy_as_hal_print_message(KERN_INFO "GPIO_%d(PAD_CFG=%x,OE=%d"
695 "DOUT=%d, DIN=%d IRQEN=%d)\n\n",
696 pad_cfg_tab->pin_num, tmp16,
697 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_OE),
698 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_OUT),
699 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_IN),
700 OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_IRQENABLE1)
701 );
702
703
704
705
706 pad_cfg_tab++;
707 }
708
709 cy_as_hal_print_message(KERN_INFO"pads configured\n");
710}
711
712
713
714
715
716static void cy_as_hal_release_user_pads(user_pad_cfg_t *pad_cfg_tab)
717{
718 while (pad_cfg_tab->name != NULL) {
719
720 if (pad_cfg_tab->valid) {
721 gpio_free(pad_cfg_tab->pin_num);
722 pad_cfg_tab->valid = 0;
723 cy_as_hal_print_message(KERN_INFO "GPIO_%d "
724 "released from %s\n",
725 pad_cfg_tab->pin_num,
726 pad_cfg_tab->name);
727 } else {
728 cy_as_hal_print_message(KERN_INFO "no release "
729 "for %s, GPIO_%d, wasn't acquired\n",
730 pad_cfg_tab->name,
731 pad_cfg_tab->pin_num);
732 }
733 pad_cfg_tab++;
734 }
735}
736
737void cy_as_hal_config_c_s_mux(void)
738{
739
740
741
742 omap_cfg_reg_L(T8_OMAP3430_GPMC_n_c_s4);
743}
744EXPORT_SYMBOL(cy_as_hal_config_c_s_mux);
745
746
747
748
749uint32_t cy_as_hal_processor_hw_init(void)
750{
751 int i, err;
752
753 cy_as_hal_print_message(KERN_INFO "init OMAP3430 hw...\n");
754
755 iomux_vma = (u32)ioremap_nocache(
756 (u32)CTLPADCONF_BASE_ADDR, CTLPADCONF_SIZE);
757 cy_as_hal_print_message(KERN_INFO "PADCONF_VMA=%x val=%x\n",
758 iomux_vma, IORD32(iomux_vma));
759
760
761
762
763 for (i = 0; i < 6; i++) {
764 gpio_vma_tab[i].virt_addr = (u32)ioremap_nocache(
765 gpio_vma_tab[i].phy_addr,
766 gpio_vma_tab[i].size);
767
768 cy_as_hal_print_message(KERN_INFO "%s virt_addr=%x\n",
769 gpio_vma_tab[i].name,
770 (u32)gpio_vma_tab[i].virt_addr);
771 };
772
773
774
775
776
777 gpio_free(AST_RESET);
778
779
780
781
782 gpio_free(AST_CS);
783
784
785
786
787 cy_as_hal_init_user_pads(user_pad_cfg);
788
789 err = cy_as_hal_gpmc_init();
790 if (err < 0)
791 cy_as_hal_print_message(KERN_INFO"gpmc init failed:%d", err);
792
793 cy_as_hal_config_c_s_mux();
794
795 return gpmc_data_vma;
796}
797EXPORT_SYMBOL(cy_as_hal_processor_hw_init);
798
799void cy_as_hal_omap_hardware_deinit(cy_as_omap_dev_kernel *dev_p)
800{
801
802
803
804 if (gpmc_data_vma != 0)
805 iounmap((void *)gpmc_data_vma);
806
807 if (csa_phy != 0)
808 release_mem_region(csa_phy, BLKSZ_4K);
809
810 gpmc_cs_free(AST_GPMC_CS);
811
812 free_irq(OMAP_GPIO_IRQ(AST_INT), dev_p);
813
814 cy_as_hal_release_user_pads(user_pad_cfg);
815}
816
817
818
819
820
821
822
823
824
825int stop_o_m_a_p_kernel(const char *pgm, cy_as_hal_device_tag tag)
826{
827 cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
828
829
830
831
832 if (0 == dev_p)
833 return 1;
834
835 cy_as_hal_print_message("<1>_stopping OMAP34xx HAL layer object\n");
836 if (dev_p->m_sig != CY_AS_OMAP_KERNEL_HAL_SIG) {
837 cy_as_hal_print_message("<1>%s: %s: bad HAL tag\n",
838 pgm, __func__);
839 return 1;
840 }
841
842
843
844
845 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
846 CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
847
848#if 0
849 if (dev_p->thread_flag == 0) {
850 dev_p->thread_flag = 1;
851 wait_for_completion(&dev_p->thread_complete);
852 cy_as_hal_print_message("cyasomaphal:"
853 "done cleaning thread\n");
854 cy_as_hal_destroy_sleep_channel(&dev_p->thread_sc);
855 }
856#endif
857
858 cy_as_hal_omap_hardware_deinit(dev_p);
859
860
861
862
863 if (m_omap_list_p == dev_p)
864 m_omap_list_p = dev_p->m_next_p;
865
866 cy_as_hal_free(dev_p);
867
868 cy_as_hal_print_message(KERN_INFO"OMAP_kernel_hal stopped\n");
869 return 0;
870}
871
872int omap_start_intr(cy_as_hal_device_tag tag)
873{
874 cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
875 int ret = 0;
876 const uint16_t mask = CY_AS_MEM_P0_INTR_REG_DRQINT |
877 CY_AS_MEM_P0_INTR_REG_MBINT;
878
879
880
881
882 ret = cy_as_hal_configure_interrupts(dev_p);
883
884
885
886
887 cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
888 CY_AS_MEM_P0_INT_MASK_REG, mask);
889
890 return 1;
891}
892
893
894
895
896
897
898
899
900
901
902static inline void nand_cmd_n_addr(u8 cmdb1, u16 col_addr, u32 row_addr)
903{
904
905
906
907 u32 tmpa32 = ((row_addr << 16) | col_addr);
908 u8 RA2 = (u8)(row_addr >> 16);
909
910 if (!pnand_16bit) {
911
912
913
914
915
916
917 IOWR8(ncmd_reg_vma, cmdb1);
918
919
920
921
922 IOWR32(naddr_reg_vma, tmpa32);
923
924
925
926
927 IOWR8(naddr_reg_vma, RA2);
928
929 } else {
930
931
932
933
934 uint8_t CA0, CA1, RA0, RA1;
935 CA0 = tmpa32 & 0x000000ff;
936 CA1 = (tmpa32 >> 8) & 0x000000ff;
937 RA0 = (tmpa32 >> 16) & 0x000000ff;
938 RA1 = (tmpa32 >> 24) & 0x000000ff;
939
940
941
942
943
944
945
946
947
948 IOWR8(ncmd_reg_vma, cmdb1);
949 IOWR8(naddr_reg_vma, CA0);
950 IOWR8(naddr_reg_vma, CA1);
951 IOWR8(naddr_reg_vma, RA0);
952 IOWR8(naddr_reg_vma, RA1);
953 IOWR8(naddr_reg_vma, RA2);
954 }
955}
956
957
958
959
960inline int wait_rn_b_high(void)
961{
962 u32 w_spins = 0;
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977 return w_spins;
978}
979
980#ifdef ENABLE_GPMC_PF_ENGINE
981
982
983
984
985static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
986{
987 uint16_t w32cnt;
988 uint32_t *ptr32;
989 uint8_t *ptr8;
990 uint8_t bytes_in_fifo;
991
992
993#ifdef PFE_READ_DEBUG
994 uint32_t loop_limit;
995 uint16_t bytes_read = 0;
996#endif
997
998
999
1000
1001 uint32_t tmp32;
1002 uint32_t pfe_status;
1003
1004
1005
1006
1007
1008 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x0);
1009 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
1010 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
1011
1012#ifdef PFE_READ_DEBUG
1013 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG1));
1014 if (tmp32 != GPMC_PREFETCH_CONFIG1_VAL) {
1015 printk(KERN_INFO "<1> prefetch is CONFIG1 read val:%8.8x, != VAL written:%8.8x\n",
1016 tmp32, GPMC_PREFETCH_CONFIG1_VAL);
1017 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1018 printk(KERN_INFO "<1> GPMC_PREFETCH_STATUS : %8.8x\n", tmp32);
1019 }
1020
1021
1022
1023
1024 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG2));
1025 if (tmp32 != (count))
1026 printk(KERN_INFO "<1> GPMC_PREFETCH_CONFIG2 read val:%d, "
1027 "!= VAL written:%d\n", tmp32, count);
1028#endif
1029
1030
1031
1032
1033
1034 nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1035
1036 IOWR8(ncmd_reg_vma, RDPAGE_B2);
1037
1038
1039
1040
1041 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
1042
1043 ptr32 = buff;
1044
1045 while (1) {
1046
1047
1048
1049 do {
1050
1051
1052
1053
1054 pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1055 bytes_in_fifo = (pfe_status >> 24) & 0x7f;
1056 } while (bytes_in_fifo == 0);
1057
1058
1059 w32cnt = bytes_in_fifo >> 2;
1060
1061#if 0
1062
1063
1064
1065
1066
1067
1068 printk(KERN_ERR "<1> got PF_STATUS:%8.8x FIFO_PTR:%d, COUNTVAL:%d, w32cnt:%d\n",
1069 pfe_status, bytes_in_fifo,
1070 (pfe_status & 0x3fff), w32cnt);
1071#endif
1072
1073 while (w32cnt--)
1074 *ptr32++ = IORD32(gpmc_data_vma);
1075
1076 if ((pfe_status & 0x3fff) == 0) {
1077
1078
1079
1080
1081
1082 bytes_in_fifo = (IORD32(
1083 GPMC_VMA(GPMC_PREFETCH_STATUS)) >> 24) & 0x7f;
1084
1085
1086
1087
1088
1089 ptr8 = ptr32;
1090 switch (bytes_in_fifo) {
1091
1092 case 0:
1093
1094
1095
1096
1097 break;
1098 case 1:
1099
1100
1101
1102
1103 *ptr8 = IORD8(gpmc_data_vma);
1104 break;
1105
1106 case 2:
1107
1108
1109
1110 *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
1111 break;
1112
1113 case 3:
1114
1115
1116
1117 *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
1118 ptr8 += 2;
1119 *ptr8 = IORD8(gpmc_data_vma);
1120 break;
1121
1122 case 4:
1123
1124
1125
1126
1127 *ptr32 = IORD32(gpmc_data_vma);
1128 break;
1129
1130 default:
1131 printk(KERN_ERR"<1>_error: PFE FIFO bytes leftover is not read:%d\n",
1132 bytes_in_fifo);
1133 break;
1134 }
1135
1136
1137
1138 break;
1139 }
1140 }
1141}
1142#endif
1143
1144#ifdef PFE_LBD_READ_V2
1145
1146
1147
1148static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
1149{
1150 uint8_t rd_cnt;
1151 uint32_t *ptr32;
1152 uint8_t *ptr8;
1153 uint16_t reminder;
1154 uint32_t pfe_status;
1155
1156
1157
1158
1159
1160 nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1161 IOWR8(ncmd_reg_vma, RDPAGE_B2);
1162
1163
1164
1165
1166
1167
1168 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
1169 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
1170 IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
1171
1172 ptr32 = buff;
1173
1174 do {
1175 pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1176 rd_cnt = pfe_status >> (24+2);
1177
1178 while (rd_cnt--)
1179 *ptr32++ = IORD32(gpmc_data_vma);
1180
1181 } while (pfe_status & 0x3fff);
1182
1183
1184
1185
1186 ptr8 = ptr32;
1187 rd_cnt = (IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS)) >> 24) & 0x7f;
1188
1189 while (rd_cnt--)
1190 *ptr8++ = IORD8(gpmc_data_vma);
1191}
1192#endif
1193
1194#ifdef PNAND_LBD_READ_NO_PFE
1195
1196
1197
1198
1199
1200static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
1201{
1202 uint16_t w32cnt;
1203 uint32_t *ptr32;
1204 uint16_t *ptr16;
1205 uint16_t remainder;
1206
1207 DBGPRN("<1> %s(): NO_PFE\n", __func__);
1208
1209 ptr32 = buff;
1210
1211 w32cnt = count >> 2;
1212
1213
1214 remainder = count & 03;
1215
1216
1217
1218
1219 nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1220 IOWR8(ncmd_reg_vma, RDPAGE_B2);
1221
1222
1223
1224
1225 while (w32cnt--)
1226 *ptr32++ = IORD32(ndata_reg_vma);
1227
1228
1229
1230
1231
1232
1233 ptr16 = (uint16_t *)ptr32;
1234
1235 switch (remainder) {
1236 case 1:
1237
1238
1239
1240 case 2:
1241 *ptr16 = IORD16(ndata_reg_vma);
1242 break;
1243 case 3:
1244
1245
1246
1247 *ptr16++ = IORD16(ndata_reg_vma);
1248 *ptr16 = IORD16(ndata_reg_vma);
1249 break;
1250 default:
1251
1252
1253
1254 break;
1255 }
1256}
1257#endif
1258
1259
1260
1261
1262
1263
1264static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff)
1265{
1266 uint16_t w32cnt;
1267 uint16_t remainder;
1268 uint8_t *ptr8;
1269 uint16_t *ptr16;
1270 uint32_t *ptr32;
1271
1272 remainder = count & 03;
1273 w32cnt = count >> 2;
1274 ptr32 = buff;
1275 ptr8 = buff;
1276
1277
1278
1279
1280 nand_cmd_n_addr(PGMPAGE_B1, col_addr, row_addr);
1281
1282
1283
1284
1285 while (w32cnt--)
1286 IOWR32(ndata_reg_vma, *ptr32++);
1287
1288
1289
1290
1291
1292 ptr16 = (uint16_t *)ptr32;
1293
1294 switch (remainder) {
1295 case 1:
1296
1297
1298
1299 case 2:
1300 IOWR16(ndata_reg_vma, *ptr16);
1301 break;
1302
1303 case 3:
1304
1305
1306
1307 IOWR16(ndata_reg_vma, *ptr16++);
1308 IOWR16(ndata_reg_vma, *ptr16);
1309 break;
1310 default:
1311
1312
1313
1314 break;
1315 }
1316
1317
1318
1319 IOWR8(ncmd_reg_vma, PGMPAGE_B2);
1320}
1321
1322
1323
1324
1325static inline void ast_p_nand_casdi_write(u8 reg_addr8, u16 data)
1326{
1327 unsigned long flags;
1328 u16 addr16;
1329
1330
1331
1332 static atomic_t rdreg_usage_cnt = { 0 };
1333
1334
1335
1336
1337 local_irq_save(flags);
1338
1339 if (atomic_read(&rdreg_usage_cnt) != 0) {
1340 cy_as_hal_print_message(KERN_ERR "cy_as_omap_hal:"
1341 "* cy_as_hal_write_register usage:%d\n",
1342 atomic_read(&rdreg_usage_cnt));
1343 }
1344
1345 atomic_inc(&rdreg_usage_cnt);
1346
1347
1348
1349
1350 if (pnand_16bit) {
1351
1352
1353
1354
1355
1356
1357
1358 IOWR8(ncmd_reg_vma, 0x85);
1359 IOWR8(naddr_reg_vma, reg_addr8);
1360 IOWR8(naddr_reg_vma, 0x0c);
1361
1362
1363
1364
1365 IOWR16(ndata_reg_vma, data);
1366 } else {
1367
1368
1369
1370
1371
1372 addr16 = 0x0c00 | reg_addr8;
1373
1374
1375
1376
1377 IOWR8(ncmd_reg_vma, 0x85);
1378 IOWR16(naddr_reg_vma, addr16);
1379 IOWR16(ndata_reg_vma, data);
1380 }
1381
1382
1383
1384
1385 atomic_dec(&rdreg_usage_cnt);
1386 local_irq_restore(flags);
1387}
1388
1389
1390
1391
1392
1393static inline u16 ast_p_nand_casdo_read(u8 reg_addr8)
1394{
1395 u16 data;
1396 u16 addr16;
1397 unsigned long flags;
1398
1399
1400
1401 static atomic_t wrreg_usage_cnt = { 0 };
1402
1403
1404
1405
1406 local_irq_save(flags);
1407
1408 if (atomic_read(&wrreg_usage_cnt) != 0) {
1409
1410
1411
1412
1413 cy_as_hal_print_message(KERN_ERR"cy_as_omap_hal: "
1414 "cy_as_hal_write_register usage:%d\n",
1415 atomic_read(&wrreg_usage_cnt));
1416 }
1417 atomic_inc(&wrreg_usage_cnt);
1418
1419
1420
1421
1422 if (pnand_16bit) {
1423
1424
1425
1426
1427
1428 IOWR8(ncmd_reg_vma, 0x05);
1429 IOWR8(naddr_reg_vma, reg_addr8);
1430 IOWR8(naddr_reg_vma, 0x0c);
1431 IOWR8(ncmd_reg_vma, 0x00E0);
1432
1433 udelay(1);
1434
1435
1436
1437
1438 data = IORD16(ndata_reg_vma);
1439 } else {
1440
1441
1442
1443
1444 addr16 = 0x0c00 | reg_addr8;
1445 IOWR8(ncmd_reg_vma, 0x05);
1446 IOWR16(naddr_reg_vma, addr16);
1447 IOWR8(ncmd_reg_vma, 0xE0);
1448 udelay(1);
1449 data = IORD16(ndata_reg_vma);
1450 }
1451
1452
1453
1454
1455 atomic_dec(&wrreg_usage_cnt);
1456 local_irq_restore(flags);
1457
1458 return data;
1459}
1460
1461
1462
1463
1464
1465
1466
1467void cy_as_hal_write_register(
1468 cy_as_hal_device_tag tag,
1469 uint16_t addr, uint16_t data)
1470{
1471 ast_p_nand_casdi_write((u8)addr, data);
1472}
1473
1474
1475
1476
1477
1478
1479uint16_t cy_as_hal_read_register(cy_as_hal_device_tag tag, uint16_t addr)
1480{
1481 uint16_t data = 0;
1482
1483
1484
1485
1486 data = ast_p_nand_casdo_read((u8)addr);
1487
1488 return data;
1489}
1490
1491
1492
1493
1494
1495
1496
1497static inline bool prep_for_next_xfer(cy_as_hal_device_tag tag, uint8_t ep)
1498{
1499
1500 if (!end_points[ep].sg_list_enabled) {
1501
1502
1503
1504
1505
1506 if (end_points[ep].req_xfer_cnt >= end_points[ep].req_length) {
1507 DBGPRN("<1> %s():RQ sz:%d non-_sg EP:%d completed\n",
1508 __func__, end_points[ep].req_length, ep);
1509
1510
1511
1512
1513 return false;
1514 }
1515
1516
1517
1518
1519
1520
1521 if ((end_points[ep].req_length - end_points[ep].req_xfer_cnt)
1522 >= HAL_DMA_PKT_SZ) {
1523 end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
1524 } else {
1525
1526
1527
1528
1529 end_points[ep].dma_xfer_sz = end_points[ep].req_length -
1530 end_points[ep].req_xfer_cnt;
1531 }
1532
1533 return true;
1534 }
1535
1536
1537
1538
1539
1540 if (end_points[ep].seg_xfer_cnt == end_points[ep].sg_p->length) {
1541
1542
1543
1544 if (sg_is_last(end_points[ep].sg_p)) {
1545 DBGPRN("<1> %s: EP:%d completed,"
1546 "%d bytes xfered\n",
1547 __func__,
1548 ep,
1549 end_points[ep].req_xfer_cnt
1550 );
1551
1552 return false;
1553 } else {
1554
1555
1556
1557
1558
1559 end_points[ep].seg_xfer_cnt = 0;
1560 end_points[ep].sg_p = sg_next(end_points[ep].sg_p);
1561
1562 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
1563 DBGPRN("<1> %s new SG:_va:%p\n\n",
1564 __func__, end_points[ep].data_p);
1565 }
1566
1567 }
1568
1569
1570
1571
1572 end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
1573
1574
1575
1576
1577
1578 return true;
1579}
1580
1581
1582
1583
1584static void cy_service_e_p_dma_read_request(
1585 cy_as_omap_dev_kernel *dev_p, uint8_t ep)
1586{
1587 cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
1588 uint16_t v, size;
1589 void *dptr;
1590 uint16_t col_addr = 0x0000;
1591 uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
1592 uint16_t ep_dma_reg = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1593
1594
1595
1596
1597 v = cy_as_hal_read_register(tag, ep_dma_reg);
1598
1599
1600
1601
1602 size = v & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK;
1603
1604
1605
1606
1607 dptr = end_points[ep].data_p;
1608
1609 DBGPRN("<1>HAL:_svc_dma_read on EP_%d sz:%d, intr_seq:%d, dptr:%p\n",
1610 ep,
1611 size,
1612 intr_sequence_num,
1613 dptr
1614 );
1615
1616 cy_as_hal_assert(size != 0);
1617
1618 if (size) {
1619
1620
1621
1622 p_nand_lbd_read(col_addr, row_addr, size, dptr);
1623 }
1624
1625
1626
1627
1628 cy_as_hal_write_register(tag, ep_dma_reg, 0);
1629
1630 end_points[ep].seg_xfer_cnt += size;
1631 end_points[ep].req_xfer_cnt += size;
1632
1633
1634
1635
1636
1637 end_points[ep].data_p += size;
1638
1639 if (prep_for_next_xfer(tag, ep)) {
1640
1641
1642
1643
1644
1645 v = end_points[ep].dma_xfer_sz |
1646 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1647 cy_as_hal_write_register(tag, ep_dma_reg, v);
1648 } else {
1649 end_points[ep].pending = cy_false;
1650 end_points[ep].type = cy_as_hal_none;
1651 end_points[ep].buffer_valid = cy_false;
1652
1653
1654
1655
1656 if (callback) {
1657 DBGPRN("<1>trigg rd_dma completion cb: xfer_sz:%d\n",
1658 end_points[ep].req_xfer_cnt);
1659 callback(tag, ep,
1660 end_points[ep].req_xfer_cnt,
1661 CY_AS_ERROR_SUCCESS);
1662 }
1663 }
1664}
1665
1666
1667
1668
1669static void cy_service_e_p_dma_write_request(
1670 cy_as_omap_dev_kernel *dev_p, uint8_t ep)
1671{
1672 uint16_t addr;
1673 uint16_t v = 0;
1674 uint32_t size;
1675 uint16_t col_addr = 0x0000;
1676 uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
1677 void *dptr;
1678
1679 cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
1680
1681
1682
1683
1684 size = end_points[ep].dma_xfer_sz;
1685 dptr = end_points[ep].data_p;
1686
1687
1688
1689
1690 if (size)
1691 p_nand_lbd_write(col_addr, row_addr, size, dptr);
1692
1693 end_points[ep].seg_xfer_cnt += size;
1694 end_points[ep].req_xfer_cnt += size;
1695
1696
1697
1698
1699 end_points[ep].data_p += size;
1700
1701
1702
1703
1704
1705
1706
1707
1708 addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1709 cy_as_hal_write_register(tag, addr, size);
1710
1711
1712
1713
1714
1715
1716 if (prep_for_next_xfer(tag, ep)) {
1717
1718
1719
1720 v = end_points[ep].dma_xfer_sz |
1721 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1722 cy_as_hal_write_register(tag, addr, v);
1723 } else {
1724
1725 end_points[ep].pending = cy_false;
1726 end_points[ep].type = cy_as_hal_none;
1727 end_points[ep].buffer_valid = cy_false;
1728
1729
1730
1731
1732 if (callback) {
1733
1734
1735
1736
1737 callback(tag, ep,
1738 end_points[ep].req_xfer_cnt,
1739 CY_AS_ERROR_SUCCESS);
1740 }
1741 }
1742}
1743
1744
1745
1746
1747static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p)
1748{
1749 uint16_t v;
1750 static uint8_t service_ep = 2;
1751
1752
1753
1754 v = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
1755 CY_AS_MEM_P0_DRQ);
1756
1757 if (v == 0) {
1758#ifndef WESTBRIDGE_NDEBUG
1759 cy_as_hal_print_message("stray DRQ interrupt detected\n");
1760#endif
1761 return;
1762 }
1763
1764
1765
1766
1767
1768
1769
1770 while ((v & (1 << service_ep)) == 0) {
1771
1772 if (service_ep == 15)
1773 service_ep = 2;
1774 else
1775 service_ep++;
1776 }
1777
1778 if (end_points[service_ep].type == cy_as_hal_write) {
1779
1780
1781
1782
1783 cy_service_e_p_dma_write_request(dev_p, service_ep);
1784 } else if (end_points[service_ep].type == cy_as_hal_read) {
1785
1786
1787
1788
1789 cy_service_e_p_dma_read_request(dev_p, service_ep);
1790 }
1791#ifndef WESTBRIDGE_NDEBUG
1792 else
1793 cy_as_hal_print_message("cyashalomap:interrupt,"
1794 " w/o pending DMA job,"
1795 "-check DRQ_MASK logic\n");
1796#endif
1797
1798
1799
1800
1801
1802 if (end_points[service_ep].type == cy_as_hal_none) {
1803 if (service_ep == 15)
1804 service_ep = 2;
1805 else
1806 service_ep++;
1807 }
1808
1809}
1810
1811void cy_as_hal_dma_cancel_request(cy_as_hal_device_tag tag, uint8_t ep)
1812{
1813 DBGPRN("cy_as_hal_dma_cancel_request on ep:%d", ep);
1814 if (end_points[ep].pending)
1815 cy_as_hal_write_register(tag,
1816 CY_AS_MEM_P0_EP2_DMA_REG + ep - 2, 0);
1817
1818 end_points[ep].buffer_valid = cy_false;
1819 end_points[ep].type = cy_as_hal_none;
1820}
1821
1822
1823
1824
1825
1826
1827void cy_as_hal_set_ep_dma_mode(uint8_t ep, bool sg_xfer_enabled)
1828{
1829 end_points[ep].sg_list_enabled = sg_xfer_enabled;
1830 DBGPRN("<1> EP:%d sg_list assisted DMA mode set to = %d\n",
1831 ep, end_points[ep].sg_list_enabled);
1832}
1833EXPORT_SYMBOL(cy_as_hal_set_ep_dma_mode);
1834
1835
1836
1837
1838
1839
1840
1841void cy_as_hal_dma_setup_write(cy_as_hal_device_tag tag,
1842 uint8_t ep, void *buf,
1843 uint32_t size, uint16_t maxsize)
1844{
1845 uint32_t addr = 0;
1846 uint16_t v = 0;
1847
1848
1849
1850
1851
1852
1853 cy_as_hal_assert(ep != 0 && ep != 1);
1854
1855
1856
1857
1858
1859 cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
1860 end_points[ep].buffer_valid = cy_true;
1861 end_points[ep].type = cy_as_hal_write;
1862 end_points[ep].pending = cy_true;
1863
1864
1865
1866
1867 end_points[ep].req_length = size;
1868
1869 if (size >= maxsize) {
1870
1871
1872
1873
1874 end_points[ep].dma_xfer_sz = maxsize;
1875 } else {
1876
1877
1878
1879 end_points[ep].dma_xfer_sz = size;
1880 }
1881
1882
1883
1884
1885
1886
1887
1888 if (end_points[ep].sg_list_enabled) {
1889
1890
1891
1892
1893
1894
1895
1896
1897 end_points[ep].sg_p = buf;
1898 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
1899 end_points[ep].seg_xfer_cnt = 0;
1900 end_points[ep].req_xfer_cnt = 0;
1901
1902#ifdef DBGPRN_DMA_SETUP_WR
1903 DBGPRN("cyasomaphal:%s: EP:%d, buf:%p, buf_va:%p,"
1904 "req_sz:%d, maxsz:%d\n",
1905 __func__,
1906 ep,
1907 buf,
1908 end_points[ep].data_p,
1909 size,
1910 maxsize);
1911#endif
1912
1913 } else {
1914
1915
1916
1917
1918 #ifdef DBGPRN_DMA_SETUP_WR
1919 DBGPRN("<1>%s non storage or sz < 512:"
1920 "EP:%d, sz:%d\n", __func__, ep, size);
1921 #endif
1922
1923 end_points[ep].sg_p = NULL;
1924
1925
1926
1927
1928 end_points[ep].data_p = buf;
1929
1930
1931
1932
1933 end_points[ep].req_xfer_cnt = 0;
1934 }
1935
1936
1937
1938
1939 v = (end_points[ep].dma_xfer_sz & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK)
1940 | CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1941
1942 addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1943
1944 cy_as_hal_write_register(tag, addr, v);
1945}
1946
1947
1948
1949
1950
1951
1952
1953void cy_as_hal_dma_setup_read(cy_as_hal_device_tag tag,
1954 uint8_t ep, void *buf,
1955 uint32_t size, uint16_t maxsize)
1956{
1957 uint32_t addr;
1958 uint16_t v;
1959
1960
1961
1962
1963
1964
1965 cy_as_hal_assert(ep != 0 && ep != 1);
1966
1967
1968
1969
1970
1971
1972
1973
1974 cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
1975
1976 end_points[ep].buffer_valid = cy_true;
1977 end_points[ep].type = cy_as_hal_read;
1978 end_points[ep].pending = cy_true;
1979 end_points[ep].req_xfer_cnt = 0;
1980 end_points[ep].req_length = size;
1981
1982 if (size >= maxsize) {
1983
1984
1985
1986
1987 end_points[ep].dma_xfer_sz = maxsize;
1988 } else {
1989
1990
1991
1992
1993 end_points[ep].dma_xfer_sz = size;
1994 }
1995
1996 addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1997
1998 if (end_points[ep].sg_list_enabled) {
1999
2000
2001
2002
2003
2004
2005 end_points[ep].seg_xfer_cnt = 0;
2006 end_points[ep].sg_p = buf;
2007 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
2008
2009 #ifdef DBGPRN_DMA_SETUP_RD
2010 DBGPRN("cyasomaphal:DMA_setup_read sg_list EP:%d, "
2011 "buf:%p, buf_va:%p, req_sz:%d, maxsz:%d\n",
2012 ep,
2013 buf,
2014 end_points[ep].data_p,
2015 size,
2016 maxsize);
2017 #endif
2018 v = (end_points[ep].dma_xfer_sz &
2019 CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
2020 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
2021 cy_as_hal_write_register(tag, addr, v);
2022 } else {
2023
2024
2025
2026 #ifdef DBGPRN_DMA_SETUP_RD
2027 DBGPRN("%s:non-sg_list EP:%d,"
2028 "RQ_sz:%d, maxsz:%d\n",
2029 __func__, ep, size, maxsize);
2030 #endif
2031
2032 end_points[ep].sg_p = NULL;
2033
2034
2035
2036
2037 end_points[ep].data_p = buf;
2038
2039
2040
2041
2042 if (is_storage_e_p(ep)) {
2043 v = (end_points[ep].dma_xfer_sz &
2044 CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
2045 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
2046 cy_as_hal_write_register(tag, addr, v);
2047 }
2048 }
2049}
2050
2051
2052
2053
2054
2055
2056void cy_as_hal_dma_register_callback(cy_as_hal_device_tag tag,
2057 cy_as_hal_dma_complete_callback cb)
2058{
2059 DBGPRN("<1>\n%s: WB API has registered a dma_complete callback:%x\n",
2060 __func__, (uint32_t)cb);
2061 callback = cb;
2062}
2063
2064
2065
2066
2067
2068
2069
2070uint32_t cy_as_hal_dma_max_request_size(cy_as_hal_device_tag tag,
2071 cy_as_end_point_number_t ep)
2072{
2073
2074
2075
2076
2077
2078 if ((ep == CYASSTORAGE_READ_EP_NUM) ||
2079 (ep == CYASSTORAGE_WRITE_EP_NUM)) {
2080
2081 return CYASSTORAGE_MAX_XFER_SIZE;
2082 } else {
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092 return CY_AS_DMA_MAX_SIZE_HW_SIZE;
2093 }
2094}
2095
2096
2097
2098
2099
2100
2101cy_bool cy_as_hal_set_wakeup_pin(cy_as_hal_device_tag tag, cy_bool state)
2102{
2103
2104
2105
2106 return cy_false;
2107}
2108
2109void cy_as_hal_pll_lock_loss_handler(cy_as_hal_device_tag tag)
2110{
2111 cy_as_hal_print_message("error: astoria PLL lock is lost\n");
2112 cy_as_hal_print_message("please check the input voltage levels");
2113 cy_as_hal_print_message("and clock, and restart the system\n");
2114}
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125void *cy_as_hal_alloc(uint32_t cnt)
2126{
2127 return kmalloc(cnt, GFP_ATOMIC);
2128}
2129
2130
2131
2132
2133
2134
2135void cy_as_hal_free(void *mem_p)
2136{
2137 kfree(mem_p);
2138}
2139
2140
2141
2142
2143
2144
2145void *cy_as_hal_c_b_alloc(uint32_t cnt)
2146{
2147 return kmalloc(cnt, GFP_ATOMIC);
2148}
2149
2150
2151
2152
2153
2154
2155void cy_as_hal_mem_set(void *ptr, uint8_t value, uint32_t cnt)
2156{
2157 memset(ptr, value, cnt);
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168cy_bool cy_as_hal_create_sleep_channel(cy_as_hal_sleep_channel *channel)
2169{
2170 init_waitqueue_head(&channel->wq);
2171 return cy_true;
2172}
2173
2174
2175
2176
2177
2178
2179cy_bool cy_as_hal_destroy_sleep_channel(cy_as_hal_sleep_channel *channel)
2180{
2181 return cy_true;
2182}
2183
2184
2185
2186
2187cy_bool cy_as_hal_sleep_on(cy_as_hal_sleep_channel *channel, uint32_t ms)
2188{
2189 wait_event_interruptible_timeout(channel->wq, 0, ((ms * HZ)/1000));
2190 return cy_true;
2191}
2192
2193
2194
2195
2196cy_bool cy_as_hal_wake(cy_as_hal_sleep_channel *channel)
2197{
2198 wake_up_interruptible_all(&channel->wq);
2199 return cy_true;
2200}
2201
2202uint32_t cy_as_hal_disable_interrupts()
2203{
2204 if (0 == intr__enable)
2205 ;
2206
2207 intr__enable++;
2208 return 0;
2209}
2210
2211void cy_as_hal_enable_interrupts(uint32_t val)
2212{
2213 intr__enable--;
2214 if (0 == intr__enable)
2215 ;
2216}
2217
2218
2219
2220
2221void cy_as_hal_sleep150(void)
2222{
2223 uint32_t i, j;
2224
2225 j = 0;
2226 for (i = 0; i < 1000; i++)
2227 j += (~i);
2228}
2229
2230void cy_as_hal_sleep(uint32_t ms)
2231{
2232 cy_as_hal_sleep_channel channel;
2233
2234 cy_as_hal_create_sleep_channel(&channel);
2235 cy_as_hal_sleep_on(&channel, ms);
2236 cy_as_hal_destroy_sleep_channel(&channel);
2237}
2238
2239cy_bool cy_as_hal_is_polling()
2240{
2241 return cy_false;
2242}
2243
2244void cy_as_hal_c_b_free(void *ptr)
2245{
2246 cy_as_hal_free(ptr);
2247}
2248
2249
2250
2251
2252
2253void cy_as_hal_init_dev_registers(cy_as_hal_device_tag tag,
2254 cy_bool is_standby_wakeup)
2255{
2256
2257 (void) tag;
2258 (void) is_standby_wakeup;
2259}
2260
2261void cy_as_hal_read_regs_before_standby(cy_as_hal_device_tag tag)
2262{
2263
2264 (void) tag;
2265}
2266
2267cy_bool cy_as_hal_sync_device_clocks(cy_as_hal_device_tag tag)
2268{
2269
2270
2271
2272 return true;
2273}
2274
2275
2276
2277
2278int start_o_m_a_p_kernel(const char *pgm,
2279 cy_as_hal_device_tag *tag, cy_bool debug)
2280{
2281 cy_as_omap_dev_kernel *dev_p;
2282 int i;
2283 u16 data16[4];
2284 u8 pncfg_reg;
2285
2286
2287
2288
2289 (void)debug;
2290
2291 DBGPRN(KERN_INFO"starting OMAP34xx HAL...\n");
2292
2293
2294
2295
2296 for (i = 0; i < sizeof(end_points)/sizeof(end_points[0]); i++) {
2297 end_points[i].data_p = 0;
2298 end_points[i].pending = cy_false;
2299 end_points[i].size = 0;
2300 end_points[i].type = cy_as_hal_none;
2301 end_points[i].sg_list_enabled = cy_false;
2302
2303
2304
2305
2306
2307
2308
2309 }
2310
2311
2312
2313
2314 dev_p = (cy_as_omap_dev_kernel *)cy_as_hal_alloc(
2315 sizeof(cy_as_omap_dev_kernel));
2316 if (dev_p == 0) {
2317 cy_as_hal_print_message("out of memory allocating OMAP"
2318 "device structure\n");
2319 return 0;
2320 }
2321
2322 dev_p->m_sig = CY_AS_OMAP_KERNEL_HAL_SIG;
2323
2324
2325
2326
2327 dev_p->m_addr_base = (void *)cy_as_hal_processor_hw_init();
2328
2329
2330
2331
2332
2333 __gpio_set_value(AST_WAKEUP, 1);
2334
2335
2336
2337
2338 DBGPRN(KERN_INFO"-_-_pulse -> westbridge RST pin\n");
2339
2340
2341
2342
2343 __gpio_set_value(AST_RESET, 0);
2344 mdelay(1);
2345 __gpio_set_value(AST_RESET, 1);
2346 mdelay(50);
2347
2348
2349
2350
2351
2352 pncfg_reg = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
2353
2354#ifdef PNAND_16BIT_MODE
2355
2356
2357
2358
2359 ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0001);
2360
2361
2362
2363
2364
2365 cy_as_hal_gpmc_enable_16bit_bus(cy_true);
2366#else
2367
2368 ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0000);
2369#endif
2370
2371
2372
2373
2374
2375
2376
2377 data16[0] = ast_p_nand_casdo_read(CY_AS_MEM_CM_WB_CFG_ID);
2378 data16[1] = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
2379
2380 if (data16[0] != 0xA200) {
2381
2382
2383
2384 printk(KERN_ERR "ERROR: astoria device is not found, CY_AS_MEM_CM_WB_CFG_ID ");
2385 printk(KERN_ERR "read returned:%4.4X: CY_AS_MEM_PNAND_CFG:%4.4x !\n",
2386 data16[0], data16[0]);
2387 goto bus_acc_error;
2388 }
2389
2390 cy_as_hal_print_message(KERN_INFO" register access CASDO test:"
2391 "\n CY_AS_MEM_CM_WB_CFG_ID:%4.4x\n"
2392 "PNAND_CFG after RST:%4.4x\n "
2393 "CY_AS_MEM_PNAND_CFG"
2394 "after cfg_wr:%4.4x\n\n",
2395 data16[0], pncfg_reg, data16[1]);
2396
2397 dev_p->thread_flag = 1;
2398 spin_lock_init(&int_lock);
2399 dev_p->m_next_p = m_omap_list_p;
2400
2401 m_omap_list_p = dev_p;
2402 *tag = dev_p;
2403
2404 cy_as_hal_configure_interrupts((void *)dev_p);
2405
2406 cy_as_hal_print_message(KERN_INFO"OMAP3430__hal started tag:%p"
2407 ", kernel HZ:%d\n", dev_p, HZ);
2408
2409
2410
2411
2412 cy_as_hal_set_ep_dma_mode(4, true);
2413 cy_as_hal_set_ep_dma_mode(8, true);
2414
2415 return 1;
2416
2417
2418
2419
2420
2421bus_acc_error:
2422
2423
2424
2425
2426 cy_as_hal_omap_hardware_deinit(dev_p);
2427 cy_as_hal_free(dev_p);
2428 return 0;
2429}
2430
2431#else
2432
2433
2434
2435
2436
2437void my_o_m_a_p_kernel_hal_dummy_function(void)
2438{
2439}
2440
2441#endif
2442