1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/bitops.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/module.h>
20#include <linux/mtd/rawnand.h>
21#include <linux/mtd/partitions.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/delay.h>
25#include <linux/dma/qcom_bam_dma.h>
26#include <linux/dma-direct.h>
27
28
29#define NAND_FLASH_CMD 0x00
30#define NAND_ADDR0 0x04
31#define NAND_ADDR1 0x08
32#define NAND_FLASH_CHIP_SELECT 0x0c
33#define NAND_EXEC_CMD 0x10
34#define NAND_FLASH_STATUS 0x14
35#define NAND_BUFFER_STATUS 0x18
36#define NAND_DEV0_CFG0 0x20
37#define NAND_DEV0_CFG1 0x24
38#define NAND_DEV0_ECC_CFG 0x28
39#define NAND_DEV1_ECC_CFG 0x2c
40#define NAND_DEV1_CFG0 0x30
41#define NAND_DEV1_CFG1 0x34
42#define NAND_READ_ID 0x40
43#define NAND_READ_STATUS 0x44
44#define NAND_DEV_CMD0 0xa0
45#define NAND_DEV_CMD1 0xa4
46#define NAND_DEV_CMD2 0xa8
47#define NAND_DEV_CMD_VLD 0xac
48#define SFLASHC_BURST_CFG 0xe0
49#define NAND_ERASED_CW_DETECT_CFG 0xe8
50#define NAND_ERASED_CW_DETECT_STATUS 0xec
51#define NAND_EBI2_ECC_BUF_CFG 0xf0
52#define FLASH_BUF_ACC 0x100
53
54#define NAND_CTRL 0xf00
55#define NAND_VERSION 0xf08
56#define NAND_READ_LOCATION_0 0xf20
57#define NAND_READ_LOCATION_1 0xf24
58#define NAND_READ_LOCATION_2 0xf28
59#define NAND_READ_LOCATION_3 0xf2c
60
61
62#define NAND_DEV_CMD1_RESTORE 0xdead
63#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
64
65
66#define PAGE_ACC BIT(4)
67#define LAST_PAGE BIT(5)
68
69
70#define NAND_DEV_SEL 0
71#define DM_EN BIT(2)
72
73
74#define FS_OP_ERR BIT(4)
75#define FS_READY_BSY_N BIT(5)
76#define FS_MPU_ERR BIT(8)
77#define FS_DEVICE_STS_ERR BIT(16)
78#define FS_DEVICE_WP BIT(23)
79
80
81#define BS_UNCORRECTABLE_BIT BIT(8)
82#define BS_CORRECTABLE_ERR_MSK 0x1f
83
84
85#define DISABLE_STATUS_AFTER_WRITE 4
86#define CW_PER_PAGE 6
87#define UD_SIZE_BYTES 9
88#define ECC_PARITY_SIZE_BYTES_RS 19
89#define SPARE_SIZE_BYTES 23
90#define NUM_ADDR_CYCLES 27
91#define STATUS_BFR_READ 30
92#define SET_RD_MODE_AFTER_STATUS 31
93
94
95#define DEV0_CFG1_ECC_DISABLE 0
96#define WIDE_FLASH 1
97#define NAND_RECOVERY_CYCLES 2
98#define CS_ACTIVE_BSY 5
99#define BAD_BLOCK_BYTE_NUM 6
100#define BAD_BLOCK_IN_SPARE_AREA 16
101#define WR_RD_BSY_GAP 17
102#define ENABLE_BCH_ECC 27
103
104
105#define ECC_CFG_ECC_DISABLE 0
106#define ECC_SW_RESET 1
107#define ECC_MODE 4
108#define ECC_PARITY_SIZE_BYTES_BCH 8
109#define ECC_NUM_DATA_BYTES 16
110#define ECC_FORCE_CLK_OPEN 30
111
112
113#define READ_ADDR 0
114
115
116#define READ_START_VLD BIT(0)
117#define READ_STOP_VLD BIT(1)
118#define WRITE_START_VLD BIT(2)
119#define ERASE_START_VLD BIT(3)
120#define SEQ_READ_START_VLD BIT(4)
121
122
123#define NUM_STEPS 0
124
125
126#define ERASED_CW_ECC_MASK 1
127#define AUTO_DETECT_RES 0
128#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
129#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
130#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
131#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
132#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
133
134
135#define PAGE_ALL_ERASED BIT(7)
136#define CODEWORD_ALL_ERASED BIT(6)
137#define PAGE_ERASED BIT(5)
138#define CODEWORD_ERASED BIT(4)
139#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
140#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
141
142
143#define READ_LOCATION_OFFSET 0
144#define READ_LOCATION_SIZE 16
145#define READ_LOCATION_LAST 31
146
147
148#define NAND_VERSION_MAJOR_MASK 0xf0000000
149#define NAND_VERSION_MAJOR_SHIFT 28
150#define NAND_VERSION_MINOR_MASK 0x0fff0000
151#define NAND_VERSION_MINOR_SHIFT 16
152
153
154#define PAGE_READ 0x2
155#define PAGE_READ_WITH_ECC 0x3
156#define PAGE_READ_WITH_ECC_SPARE 0x4
157#define PROGRAM_PAGE 0x6
158#define PAGE_PROGRAM_WITH_ECC 0x7
159#define PROGRAM_PAGE_SPARE 0x9
160#define BLOCK_ERASE 0xa
161#define FETCH_ID 0xb
162#define RESET_DEVICE 0xd
163
164
165#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
166 ERASE_START_VLD | SEQ_READ_START_VLD)
167
168
169#define BAM_MODE_EN BIT(0)
170
171
172
173
174
175#define NANDC_STEP_SIZE 512
176
177
178
179
180
181#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
182
183
184#define MAX_REG_RD (3 * MAX_NUM_STEPS)
185
186
187#define ECC_NONE BIT(0)
188#define ECC_RS_4BIT BIT(1)
189#define ECC_BCH_4BIT BIT(2)
190#define ECC_BCH_8BIT BIT(3)
191
192#define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
193nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
194 ((offset) << READ_LOCATION_OFFSET) | \
195 ((size) << READ_LOCATION_SIZE) | \
196 ((is_last) << READ_LOCATION_LAST))
197
198
199
200
201
202#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
203
204
205#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
206
207
208#define reg_buf_dma_addr(chip, vaddr) \
209 ((chip)->reg_read_dma + \
210 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
211
212#define QPIC_PER_CW_CMD_ELEMENTS 32
213#define QPIC_PER_CW_CMD_SGL 32
214#define QPIC_PER_CW_DATA_SGL 8
215
216
217
218
219
220
221#define NAND_BAM_NO_EOT BIT(0)
222
223#define NAND_BAM_NWD BIT(1)
224
225#define NAND_BAM_NEXT_SGL BIT(2)
226
227
228
229
230#define NAND_ERASED_CW_SET BIT(4)
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249struct bam_transaction {
250 struct bam_cmd_element *bam_ce;
251 struct scatterlist *cmd_sgl;
252 struct scatterlist *data_sgl;
253 u32 bam_ce_pos;
254 u32 bam_ce_start;
255 u32 cmd_sgl_pos;
256 u32 cmd_sgl_start;
257 u32 tx_sgl_pos;
258 u32 tx_sgl_start;
259 u32 rx_sgl_pos;
260 u32 rx_sgl_start;
261};
262
263
264
265
266
267
268
269
270
271
272
273struct desc_info {
274 struct list_head node;
275
276 enum dma_data_direction dir;
277 union {
278 struct scatterlist adm_sgl;
279 struct {
280 struct scatterlist *bam_sgl;
281 int sgl_cnt;
282 };
283 };
284 struct dma_async_tx_descriptor *dma_desc;
285};
286
287
288
289
290
291struct nandc_regs {
292 __le32 cmd;
293 __le32 addr0;
294 __le32 addr1;
295 __le32 chip_sel;
296 __le32 exec;
297
298 __le32 cfg0;
299 __le32 cfg1;
300 __le32 ecc_bch_cfg;
301
302 __le32 clrflashstatus;
303 __le32 clrreadstatus;
304
305 __le32 cmd1;
306 __le32 vld;
307
308 __le32 orig_cmd1;
309 __le32 orig_vld;
310
311 __le32 ecc_buf_cfg;
312 __le32 read_location0;
313 __le32 read_location1;
314 __le32 read_location2;
315 __le32 read_location3;
316
317 __le32 erased_cw_detect_cfg_clr;
318 __le32 erased_cw_detect_cfg_set;
319};
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356struct qcom_nand_controller {
357 struct nand_hw_control controller;
358 struct list_head host_list;
359
360 struct device *dev;
361
362 void __iomem *base;
363 phys_addr_t base_phys;
364 dma_addr_t base_dma;
365
366 struct clk *core_clk;
367 struct clk *aon_clk;
368
369 union {
370
371 struct {
372 struct dma_chan *tx_chan;
373 struct dma_chan *rx_chan;
374 struct dma_chan *cmd_chan;
375 };
376
377
378 struct {
379 struct dma_chan *chan;
380 unsigned int cmd_crci;
381 unsigned int data_crci;
382 };
383 };
384
385 struct list_head desc_list;
386 struct bam_transaction *bam_txn;
387
388 u8 *data_buffer;
389 int buf_size;
390 int buf_count;
391 int buf_start;
392 unsigned int max_cwperpage;
393
394 __le32 *reg_read_buf;
395 dma_addr_t reg_read_dma;
396 int reg_read_pos;
397
398 struct nandc_regs *regs;
399
400 u32 cmd1, vld;
401 const struct qcom_nandc_props *props;
402};
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431struct qcom_nand_host {
432 struct nand_chip chip;
433 struct list_head node;
434
435 int cs;
436 int cw_size;
437 int cw_data;
438 bool use_ecc;
439 bool bch_enabled;
440 int ecc_bytes_hw;
441 int spare_bytes;
442 int bbm_size;
443 u8 status;
444 int last_command;
445
446 u32 cfg0, cfg1;
447 u32 cfg0_raw, cfg1_raw;
448 u32 ecc_buf_cfg;
449 u32 ecc_bch_cfg;
450 u32 clrflashstatus;
451 u32 clrreadstatus;
452};
453
454
455
456
457
458
459
460
461struct qcom_nandc_props {
462 u32 ecc_modes;
463 bool is_bam;
464 u32 dev_cmd_reg_start;
465};
466
467
468static void free_bam_transaction(struct qcom_nand_controller *nandc)
469{
470 struct bam_transaction *bam_txn = nandc->bam_txn;
471
472 devm_kfree(nandc->dev, bam_txn);
473}
474
475
476static struct bam_transaction *
477alloc_bam_transaction(struct qcom_nand_controller *nandc)
478{
479 struct bam_transaction *bam_txn;
480 size_t bam_txn_size;
481 unsigned int num_cw = nandc->max_cwperpage;
482 void *bam_txn_buf;
483
484 bam_txn_size =
485 sizeof(*bam_txn) + num_cw *
486 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
487 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
488 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
489
490 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
491 if (!bam_txn_buf)
492 return NULL;
493
494 bam_txn = bam_txn_buf;
495 bam_txn_buf += sizeof(*bam_txn);
496
497 bam_txn->bam_ce = bam_txn_buf;
498 bam_txn_buf +=
499 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
500
501 bam_txn->cmd_sgl = bam_txn_buf;
502 bam_txn_buf +=
503 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
504
505 bam_txn->data_sgl = bam_txn_buf;
506
507 return bam_txn;
508}
509
510
511static void clear_bam_transaction(struct qcom_nand_controller *nandc)
512{
513 struct bam_transaction *bam_txn = nandc->bam_txn;
514
515 if (!nandc->props->is_bam)
516 return;
517
518 bam_txn->bam_ce_pos = 0;
519 bam_txn->bam_ce_start = 0;
520 bam_txn->cmd_sgl_pos = 0;
521 bam_txn->cmd_sgl_start = 0;
522 bam_txn->tx_sgl_pos = 0;
523 bam_txn->tx_sgl_start = 0;
524 bam_txn->rx_sgl_pos = 0;
525 bam_txn->rx_sgl_start = 0;
526
527 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
528 QPIC_PER_CW_CMD_SGL);
529 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
530 QPIC_PER_CW_DATA_SGL);
531}
532
533static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
534{
535 return container_of(chip, struct qcom_nand_host, chip);
536}
537
538static inline struct qcom_nand_controller *
539get_qcom_nand_controller(struct nand_chip *chip)
540{
541 return container_of(chip->controller, struct qcom_nand_controller,
542 controller);
543}
544
545static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
546{
547 return ioread32(nandc->base + offset);
548}
549
550static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
551 u32 val)
552{
553 iowrite32(val, nandc->base + offset);
554}
555
556static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
557 bool is_cpu)
558{
559 if (!nandc->props->is_bam)
560 return;
561
562 if (is_cpu)
563 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
564 MAX_REG_RD *
565 sizeof(*nandc->reg_read_buf),
566 DMA_FROM_DEVICE);
567 else
568 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
569 MAX_REG_RD *
570 sizeof(*nandc->reg_read_buf),
571 DMA_FROM_DEVICE);
572}
573
574static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
575{
576 switch (offset) {
577 case NAND_FLASH_CMD:
578 return ®s->cmd;
579 case NAND_ADDR0:
580 return ®s->addr0;
581 case NAND_ADDR1:
582 return ®s->addr1;
583 case NAND_FLASH_CHIP_SELECT:
584 return ®s->chip_sel;
585 case NAND_EXEC_CMD:
586 return ®s->exec;
587 case NAND_FLASH_STATUS:
588 return ®s->clrflashstatus;
589 case NAND_DEV0_CFG0:
590 return ®s->cfg0;
591 case NAND_DEV0_CFG1:
592 return ®s->cfg1;
593 case NAND_DEV0_ECC_CFG:
594 return ®s->ecc_bch_cfg;
595 case NAND_READ_STATUS:
596 return ®s->clrreadstatus;
597 case NAND_DEV_CMD1:
598 return ®s->cmd1;
599 case NAND_DEV_CMD1_RESTORE:
600 return ®s->orig_cmd1;
601 case NAND_DEV_CMD_VLD:
602 return ®s->vld;
603 case NAND_DEV_CMD_VLD_RESTORE:
604 return ®s->orig_vld;
605 case NAND_EBI2_ECC_BUF_CFG:
606 return ®s->ecc_buf_cfg;
607 case NAND_READ_LOCATION_0:
608 return ®s->read_location0;
609 case NAND_READ_LOCATION_1:
610 return ®s->read_location1;
611 case NAND_READ_LOCATION_2:
612 return ®s->read_location2;
613 case NAND_READ_LOCATION_3:
614 return ®s->read_location3;
615 default:
616 return NULL;
617 }
618}
619
620static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
621 u32 val)
622{
623 struct nandc_regs *regs = nandc->regs;
624 __le32 *reg;
625
626 reg = offset_to_nandc_reg(regs, offset);
627
628 if (reg)
629 *reg = cpu_to_le32(val);
630}
631
632
633static void set_address(struct qcom_nand_host *host, u16 column, int page)
634{
635 struct nand_chip *chip = &host->chip;
636 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
637
638 if (chip->options & NAND_BUSWIDTH_16)
639 column >>= 1;
640
641 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
642 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
643}
644
645
646
647
648
649
650
651
652static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
653{
654 struct nand_chip *chip = &host->chip;
655 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
656 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
657
658 if (read) {
659 if (host->use_ecc)
660 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
661 else
662 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
663 } else {
664 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
665 }
666
667 if (host->use_ecc) {
668 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
669 (num_cw - 1) << CW_PER_PAGE;
670
671 cfg1 = host->cfg1;
672 ecc_bch_cfg = host->ecc_bch_cfg;
673 } else {
674 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
675 (num_cw - 1) << CW_PER_PAGE;
676
677 cfg1 = host->cfg1_raw;
678 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
679 }
680
681 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
682 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
683 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
684 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
685 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
686 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
687 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
688 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
689
690 if (read)
691 nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
692 host->cw_data : host->cw_size, 1);
693}
694
695
696
697
698
699
700static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
701 struct dma_chan *chan,
702 unsigned long flags)
703{
704 struct desc_info *desc;
705 struct scatterlist *sgl;
706 unsigned int sgl_cnt;
707 int ret;
708 struct bam_transaction *bam_txn = nandc->bam_txn;
709 enum dma_transfer_direction dir_eng;
710 struct dma_async_tx_descriptor *dma_desc;
711
712 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
713 if (!desc)
714 return -ENOMEM;
715
716 if (chan == nandc->cmd_chan) {
717 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
718 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
719 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
720 dir_eng = DMA_MEM_TO_DEV;
721 desc->dir = DMA_TO_DEVICE;
722 } else if (chan == nandc->tx_chan) {
723 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
724 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
725 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
726 dir_eng = DMA_MEM_TO_DEV;
727 desc->dir = DMA_TO_DEVICE;
728 } else {
729 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
730 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
731 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
732 dir_eng = DMA_DEV_TO_MEM;
733 desc->dir = DMA_FROM_DEVICE;
734 }
735
736 sg_mark_end(sgl + sgl_cnt - 1);
737 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
738 if (ret == 0) {
739 dev_err(nandc->dev, "failure in mapping desc\n");
740 kfree(desc);
741 return -ENOMEM;
742 }
743
744 desc->sgl_cnt = sgl_cnt;
745 desc->bam_sgl = sgl;
746
747 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
748 flags);
749
750 if (!dma_desc) {
751 dev_err(nandc->dev, "failure in prep desc\n");
752 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
753 kfree(desc);
754 return -EINVAL;
755 }
756
757 desc->dma_desc = dma_desc;
758
759 list_add_tail(&desc->node, &nandc->desc_list);
760
761 return 0;
762}
763
764
765
766
767
768
769
770
771
772
773static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
774 int reg_off, const void *vaddr,
775 int size, unsigned int flags)
776{
777 int bam_ce_size;
778 int i, ret;
779 struct bam_cmd_element *bam_ce_buffer;
780 struct bam_transaction *bam_txn = nandc->bam_txn;
781
782 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
783
784
785 for (i = 0; i < size; i++) {
786 if (read)
787 bam_prep_ce(&bam_ce_buffer[i],
788 nandc_reg_phys(nandc, reg_off + 4 * i),
789 BAM_READ_COMMAND,
790 reg_buf_dma_addr(nandc,
791 (__le32 *)vaddr + i));
792 else
793 bam_prep_ce_le32(&bam_ce_buffer[i],
794 nandc_reg_phys(nandc, reg_off + 4 * i),
795 BAM_WRITE_COMMAND,
796 *((__le32 *)vaddr + i));
797 }
798
799 bam_txn->bam_ce_pos += size;
800
801
802 if (flags & NAND_BAM_NEXT_SGL) {
803 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
804 bam_ce_size = (bam_txn->bam_ce_pos -
805 bam_txn->bam_ce_start) *
806 sizeof(struct bam_cmd_element);
807 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
808 bam_ce_buffer, bam_ce_size);
809 bam_txn->cmd_sgl_pos++;
810 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
811
812 if (flags & NAND_BAM_NWD) {
813 ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
814 DMA_PREP_FENCE |
815 DMA_PREP_CMD);
816 if (ret)
817 return ret;
818 }
819 }
820
821 return 0;
822}
823
824
825
826
827
828static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
829 const void *vaddr,
830 int size, unsigned int flags)
831{
832 int ret;
833 struct bam_transaction *bam_txn = nandc->bam_txn;
834
835 if (read) {
836 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
837 vaddr, size);
838 bam_txn->rx_sgl_pos++;
839 } else {
840 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
841 vaddr, size);
842 bam_txn->tx_sgl_pos++;
843
844
845
846
847
848 if (!(flags & NAND_BAM_NO_EOT)) {
849 ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
850 DMA_PREP_INTERRUPT);
851 if (ret)
852 return ret;
853 }
854 }
855
856 return 0;
857}
858
859static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
860 int reg_off, const void *vaddr, int size,
861 bool flow_control)
862{
863 struct desc_info *desc;
864 struct dma_async_tx_descriptor *dma_desc;
865 struct scatterlist *sgl;
866 struct dma_slave_config slave_conf;
867 enum dma_transfer_direction dir_eng;
868 int ret;
869
870 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
871 if (!desc)
872 return -ENOMEM;
873
874 sgl = &desc->adm_sgl;
875
876 sg_init_one(sgl, vaddr, size);
877
878 if (read) {
879 dir_eng = DMA_DEV_TO_MEM;
880 desc->dir = DMA_FROM_DEVICE;
881 } else {
882 dir_eng = DMA_MEM_TO_DEV;
883 desc->dir = DMA_TO_DEVICE;
884 }
885
886 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
887 if (ret == 0) {
888 ret = -ENOMEM;
889 goto err;
890 }
891
892 memset(&slave_conf, 0x00, sizeof(slave_conf));
893
894 slave_conf.device_fc = flow_control;
895 if (read) {
896 slave_conf.src_maxburst = 16;
897 slave_conf.src_addr = nandc->base_dma + reg_off;
898 slave_conf.slave_id = nandc->data_crci;
899 } else {
900 slave_conf.dst_maxburst = 16;
901 slave_conf.dst_addr = nandc->base_dma + reg_off;
902 slave_conf.slave_id = nandc->cmd_crci;
903 }
904
905 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
906 if (ret) {
907 dev_err(nandc->dev, "failed to configure dma channel\n");
908 goto err;
909 }
910
911 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
912 if (!dma_desc) {
913 dev_err(nandc->dev, "failed to prepare desc\n");
914 ret = -EINVAL;
915 goto err;
916 }
917
918 desc->dma_desc = dma_desc;
919
920 list_add_tail(&desc->node, &nandc->desc_list);
921
922 return 0;
923err:
924 kfree(desc);
925
926 return ret;
927}
928
929
930
931
932
933
934
935
936
937static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
938 int num_regs, unsigned int flags)
939{
940 bool flow_control = false;
941 void *vaddr;
942
943 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
944 nandc->reg_read_pos += num_regs;
945
946 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
947 first = dev_cmd_reg_addr(nandc, first);
948
949 if (nandc->props->is_bam)
950 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
951 num_regs, flags);
952
953 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
954 flow_control = true;
955
956 return prep_adm_dma_desc(nandc, true, first, vaddr,
957 num_regs * sizeof(u32), flow_control);
958}
959
960
961
962
963
964
965
966
967
968static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
969 int num_regs, unsigned int flags)
970{
971 bool flow_control = false;
972 struct nandc_regs *regs = nandc->regs;
973 void *vaddr;
974
975 vaddr = offset_to_nandc_reg(regs, first);
976
977 if (first == NAND_ERASED_CW_DETECT_CFG) {
978 if (flags & NAND_ERASED_CW_SET)
979 vaddr = ®s->erased_cw_detect_cfg_set;
980 else
981 vaddr = ®s->erased_cw_detect_cfg_clr;
982 }
983
984 if (first == NAND_EXEC_CMD)
985 flags |= NAND_BAM_NWD;
986
987 if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
988 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
989
990 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
991 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
992
993 if (nandc->props->is_bam)
994 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
995 num_regs, flags);
996
997 if (first == NAND_FLASH_CMD)
998 flow_control = true;
999
1000 return prep_adm_dma_desc(nandc, false, first, vaddr,
1001 num_regs * sizeof(u32), flow_control);
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1014 const u8 *vaddr, int size, unsigned int flags)
1015{
1016 if (nandc->props->is_bam)
1017 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1018
1019 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1032 const u8 *vaddr, int size, unsigned int flags)
1033{
1034 if (nandc->props->is_bam)
1035 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1036
1037 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1038}
1039
1040
1041
1042
1043
1044static void config_nand_page_read(struct qcom_nand_controller *nandc)
1045{
1046 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1047 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1048 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1049 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1050 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1051 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1052}
1053
1054
1055
1056
1057
1058static void config_nand_cw_read(struct qcom_nand_controller *nandc)
1059{
1060 if (nandc->props->is_bam)
1061 write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
1062 NAND_BAM_NEXT_SGL);
1063
1064 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1065 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1066
1067 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1068 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1069 NAND_BAM_NEXT_SGL);
1070}
1071
1072
1073
1074
1075
1076static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
1077{
1078 config_nand_page_read(nandc);
1079 config_nand_cw_read(nandc);
1080}
1081
1082
1083
1084
1085
1086static void config_nand_page_write(struct qcom_nand_controller *nandc)
1087{
1088 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1089 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1090 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1091 NAND_BAM_NEXT_SGL);
1092}
1093
1094
1095
1096
1097
1098static void config_nand_cw_write(struct qcom_nand_controller *nandc)
1099{
1100 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1101 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1102
1103 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1104
1105 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1106 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1107}
1108
1109
1110
1111
1112
1113
1114
1115static int nandc_param(struct qcom_nand_host *host)
1116{
1117 struct nand_chip *chip = &host->chip;
1118 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1119
1120
1121
1122
1123
1124
1125 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
1126 nandc_set_reg(nandc, NAND_ADDR0, 0);
1127 nandc_set_reg(nandc, NAND_ADDR1, 0);
1128 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1129 | 512 << UD_SIZE_BYTES
1130 | 5 << NUM_ADDR_CYCLES
1131 | 0 << SPARE_SIZE_BYTES);
1132 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1133 | 0 << CS_ACTIVE_BSY
1134 | 17 << BAD_BLOCK_BYTE_NUM
1135 | 1 << BAD_BLOCK_IN_SPARE_AREA
1136 | 2 << WR_RD_BSY_GAP
1137 | 0 << WIDE_FLASH
1138 | 1 << DEV0_CFG1_ECC_DISABLE);
1139 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1140
1141
1142 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
1143 (nandc->vld & ~READ_START_VLD));
1144 nandc_set_reg(nandc, NAND_DEV_CMD1,
1145 (nandc->cmd1 & ~(0xFF << READ_ADDR))
1146 | NAND_CMD_PARAM << READ_ADDR);
1147
1148 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1149
1150 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1151 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1152 nandc_set_read_loc(nandc, 0, 0, 512, 1);
1153
1154 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1155 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1156
1157 nandc->buf_count = 512;
1158 memset(nandc->data_buffer, 0xff, nandc->buf_count);
1159
1160 config_nand_single_cw_page_read(nandc);
1161
1162 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1163 nandc->buf_count, 0);
1164
1165
1166 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1167 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1168
1169 return 0;
1170}
1171
1172
1173static int erase_block(struct qcom_nand_host *host, int page_addr)
1174{
1175 struct nand_chip *chip = &host->chip;
1176 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1177
1178 nandc_set_reg(nandc, NAND_FLASH_CMD,
1179 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1180 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1181 nandc_set_reg(nandc, NAND_ADDR1, 0);
1182 nandc_set_reg(nandc, NAND_DEV0_CFG0,
1183 host->cfg0_raw & ~(7 << CW_PER_PAGE));
1184 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1185 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1186 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1187 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1188
1189 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1190 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1191 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1192
1193 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1194
1195 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1196 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1197
1198 return 0;
1199}
1200
1201
1202static int read_id(struct qcom_nand_host *host, int column)
1203{
1204 struct nand_chip *chip = &host->chip;
1205 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1206
1207 if (column == -1)
1208 return 0;
1209
1210 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
1211 nandc_set_reg(nandc, NAND_ADDR0, column);
1212 nandc_set_reg(nandc, NAND_ADDR1, 0);
1213 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1214 nandc->props->is_bam ? 0 : DM_EN);
1215 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1216
1217 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1218 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1219
1220 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1221
1222 return 0;
1223}
1224
1225
1226static int reset(struct qcom_nand_host *host)
1227{
1228 struct nand_chip *chip = &host->chip;
1229 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1230
1231 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
1232 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1233
1234 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1235 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1236
1237 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1238
1239 return 0;
1240}
1241
1242
1243static int submit_descs(struct qcom_nand_controller *nandc)
1244{
1245 struct desc_info *desc;
1246 dma_cookie_t cookie = 0;
1247 struct bam_transaction *bam_txn = nandc->bam_txn;
1248 int r;
1249
1250 if (nandc->props->is_bam) {
1251 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1252 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1253 if (r)
1254 return r;
1255 }
1256
1257 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1258 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1259 DMA_PREP_INTERRUPT);
1260 if (r)
1261 return r;
1262 }
1263
1264 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1265 r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1266 DMA_PREP_CMD);
1267 if (r)
1268 return r;
1269 }
1270 }
1271
1272 list_for_each_entry(desc, &nandc->desc_list, node)
1273 cookie = dmaengine_submit(desc->dma_desc);
1274
1275 if (nandc->props->is_bam) {
1276 dma_async_issue_pending(nandc->tx_chan);
1277 dma_async_issue_pending(nandc->rx_chan);
1278
1279 if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
1280 return -ETIMEDOUT;
1281 } else {
1282 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1283 return -ETIMEDOUT;
1284 }
1285
1286 return 0;
1287}
1288
1289static void free_descs(struct qcom_nand_controller *nandc)
1290{
1291 struct desc_info *desc, *n;
1292
1293 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1294 list_del(&desc->node);
1295
1296 if (nandc->props->is_bam)
1297 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1298 desc->sgl_cnt, desc->dir);
1299 else
1300 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1301 desc->dir);
1302
1303 kfree(desc);
1304 }
1305}
1306
1307
1308static void clear_read_regs(struct qcom_nand_controller *nandc)
1309{
1310 nandc->reg_read_pos = 0;
1311 nandc_read_buffer_sync(nandc, false);
1312}
1313
1314static void pre_command(struct qcom_nand_host *host, int command)
1315{
1316 struct nand_chip *chip = &host->chip;
1317 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1318
1319 nandc->buf_count = 0;
1320 nandc->buf_start = 0;
1321 host->use_ecc = false;
1322 host->last_command = command;
1323
1324 clear_read_regs(nandc);
1325
1326 if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1327 command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1328 clear_bam_transaction(nandc);
1329}
1330
1331
1332
1333
1334
1335
1336static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1337{
1338 struct nand_chip *chip = &host->chip;
1339 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1340 struct nand_ecc_ctrl *ecc = &chip->ecc;
1341 int num_cw;
1342 int i;
1343
1344 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1345 nandc_read_buffer_sync(nandc, true);
1346
1347 for (i = 0; i < num_cw; i++) {
1348 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1349
1350 if (flash_status & FS_MPU_ERR)
1351 host->status &= ~NAND_STATUS_WP;
1352
1353 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1354 (flash_status &
1355 FS_DEVICE_STS_ERR)))
1356 host->status |= NAND_STATUS_FAIL;
1357 }
1358}
1359
1360static void post_command(struct qcom_nand_host *host, int command)
1361{
1362 struct nand_chip *chip = &host->chip;
1363 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1364
1365 switch (command) {
1366 case NAND_CMD_READID:
1367 nandc_read_buffer_sync(nandc, true);
1368 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1369 nandc->buf_count);
1370 break;
1371 case NAND_CMD_PAGEPROG:
1372 case NAND_CMD_ERASE1:
1373 parse_erase_write_errors(host, command);
1374 break;
1375 default:
1376 break;
1377 }
1378}
1379
1380
1381
1382
1383
1384
1385
1386static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
1387 int column, int page_addr)
1388{
1389 struct nand_chip *chip = mtd_to_nand(mtd);
1390 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1391 struct nand_ecc_ctrl *ecc = &chip->ecc;
1392 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1393 bool wait = false;
1394 int ret = 0;
1395
1396 pre_command(host, command);
1397
1398 switch (command) {
1399 case NAND_CMD_RESET:
1400 ret = reset(host);
1401 wait = true;
1402 break;
1403
1404 case NAND_CMD_READID:
1405 nandc->buf_count = 4;
1406 ret = read_id(host, column);
1407 wait = true;
1408 break;
1409
1410 case NAND_CMD_PARAM:
1411 ret = nandc_param(host);
1412 wait = true;
1413 break;
1414
1415 case NAND_CMD_ERASE1:
1416 ret = erase_block(host, page_addr);
1417 wait = true;
1418 break;
1419
1420 case NAND_CMD_READ0:
1421
1422 WARN_ON(column != 0);
1423
1424 host->use_ecc = true;
1425 set_address(host, 0, page_addr);
1426 update_rw_regs(host, ecc->steps, true);
1427 break;
1428
1429 case NAND_CMD_SEQIN:
1430 WARN_ON(column != 0);
1431 set_address(host, 0, page_addr);
1432 break;
1433
1434 case NAND_CMD_PAGEPROG:
1435 case NAND_CMD_STATUS:
1436 case NAND_CMD_NONE:
1437 default:
1438 break;
1439 }
1440
1441 if (ret) {
1442 dev_err(nandc->dev, "failure executing command %d\n",
1443 command);
1444 free_descs(nandc);
1445 return;
1446 }
1447
1448 if (wait) {
1449 ret = submit_descs(nandc);
1450 if (ret)
1451 dev_err(nandc->dev,
1452 "failure submitting descs for command %d\n",
1453 command);
1454 }
1455
1456 free_descs(nandc);
1457
1458 post_command(host, command);
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1473{
1474 u8 empty1, empty2;
1475
1476
1477
1478
1479
1480
1481
1482 empty1 = data_buf[3];
1483 empty2 = data_buf[175];
1484
1485
1486
1487
1488
1489 if ((empty1 == 0x54 && empty2 == 0xff) ||
1490 (empty1 == 0xff && empty2 == 0x54)) {
1491 data_buf[3] = 0xff;
1492 data_buf[175] = 0xff;
1493 }
1494
1495
1496
1497
1498
1499 if (memchr_inv(data_buf, 0xff, data_len)) {
1500 data_buf[3] = empty1;
1501 data_buf[175] = empty2;
1502
1503 return false;
1504 }
1505
1506 return true;
1507}
1508
1509struct read_stats {
1510 __le32 flash;
1511 __le32 buffer;
1512 __le32 erased_cw;
1513};
1514
1515
1516
1517
1518
1519static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1520 u8 *oob_buf)
1521{
1522 struct nand_chip *chip = &host->chip;
1523 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1524 struct mtd_info *mtd = nand_to_mtd(chip);
1525 struct nand_ecc_ctrl *ecc = &chip->ecc;
1526 unsigned int max_bitflips = 0;
1527 struct read_stats *buf;
1528 int i;
1529
1530 buf = (struct read_stats *)nandc->reg_read_buf;
1531 nandc_read_buffer_sync(nandc, true);
1532
1533 for (i = 0; i < ecc->steps; i++, buf++) {
1534 u32 flash, buffer, erased_cw;
1535 int data_len, oob_len;
1536
1537 if (i == (ecc->steps - 1)) {
1538 data_len = ecc->size - ((ecc->steps - 1) << 2);
1539 oob_len = ecc->steps << 2;
1540 } else {
1541 data_len = host->cw_data;
1542 oob_len = 0;
1543 }
1544
1545 flash = le32_to_cpu(buf->flash);
1546 buffer = le32_to_cpu(buf->buffer);
1547 erased_cw = le32_to_cpu(buf->erased_cw);
1548
1549 if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1550 bool erased;
1551
1552
1553 if (host->bch_enabled) {
1554 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1555 true : false;
1556 } else {
1557 erased = erased_chunk_check_and_fixup(data_buf,
1558 data_len);
1559 }
1560
1561 if (erased) {
1562 data_buf += data_len;
1563 if (oob_buf)
1564 oob_buf += oob_len + ecc->bytes;
1565 continue;
1566 }
1567
1568 if (buffer & BS_UNCORRECTABLE_BIT) {
1569 int ret, ecclen, extraooblen;
1570 void *eccbuf;
1571
1572 eccbuf = oob_buf ? oob_buf + oob_len : NULL;
1573 ecclen = oob_buf ? host->ecc_bytes_hw : 0;
1574 extraooblen = oob_buf ? oob_len : 0;
1575
1576
1577
1578
1579
1580 ret = nand_check_erased_ecc_chunk(data_buf,
1581 data_len, eccbuf, ecclen, oob_buf,
1582 extraooblen, ecc->strength);
1583 if (ret < 0) {
1584 mtd->ecc_stats.failed++;
1585 } else {
1586 mtd->ecc_stats.corrected += ret;
1587 max_bitflips =
1588 max_t(unsigned int, max_bitflips, ret);
1589 }
1590 }
1591 } else {
1592 unsigned int stat;
1593
1594 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1595 mtd->ecc_stats.corrected += stat;
1596 max_bitflips = max(max_bitflips, stat);
1597 }
1598
1599 data_buf += data_len;
1600 if (oob_buf)
1601 oob_buf += oob_len + ecc->bytes;
1602 }
1603
1604 return max_bitflips;
1605}
1606
1607
1608
1609
1610
1611static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1612 u8 *oob_buf)
1613{
1614 struct nand_chip *chip = &host->chip;
1615 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1616 struct nand_ecc_ctrl *ecc = &chip->ecc;
1617 int i, ret;
1618
1619 config_nand_page_read(nandc);
1620
1621
1622 for (i = 0; i < ecc->steps; i++) {
1623 int data_size, oob_size;
1624
1625 if (i == (ecc->steps - 1)) {
1626 data_size = ecc->size - ((ecc->steps - 1) << 2);
1627 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1628 host->spare_bytes;
1629 } else {
1630 data_size = host->cw_data;
1631 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1632 }
1633
1634 if (nandc->props->is_bam) {
1635 if (data_buf && oob_buf) {
1636 nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1637 nandc_set_read_loc(nandc, 1, data_size,
1638 oob_size, 1);
1639 } else if (data_buf) {
1640 nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1641 } else {
1642 nandc_set_read_loc(nandc, 0, data_size,
1643 oob_size, 1);
1644 }
1645 }
1646
1647 config_nand_cw_read(nandc);
1648
1649 if (data_buf)
1650 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1651 data_size, 0);
1652
1653
1654
1655
1656
1657
1658
1659
1660 if (oob_buf) {
1661 int j;
1662
1663 for (j = 0; j < host->bbm_size; j++)
1664 *oob_buf++ = 0xff;
1665
1666 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1667 oob_buf, oob_size, 0);
1668 }
1669
1670 if (data_buf)
1671 data_buf += data_size;
1672 if (oob_buf)
1673 oob_buf += oob_size;
1674 }
1675
1676 ret = submit_descs(nandc);
1677 if (ret)
1678 dev_err(nandc->dev, "failure to read page/oob\n");
1679
1680 free_descs(nandc);
1681
1682 return ret;
1683}
1684
1685
1686
1687
1688
1689static int copy_last_cw(struct qcom_nand_host *host, int page)
1690{
1691 struct nand_chip *chip = &host->chip;
1692 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1693 struct nand_ecc_ctrl *ecc = &chip->ecc;
1694 int size;
1695 int ret;
1696
1697 clear_read_regs(nandc);
1698
1699 size = host->use_ecc ? host->cw_data : host->cw_size;
1700
1701
1702 memset(nandc->data_buffer, 0xff, size);
1703
1704 set_address(host, host->cw_size * (ecc->steps - 1), page);
1705 update_rw_regs(host, 1, true);
1706
1707 config_nand_single_cw_page_read(nandc);
1708
1709 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1710
1711 ret = submit_descs(nandc);
1712 if (ret)
1713 dev_err(nandc->dev, "failed to copy last codeword\n");
1714
1715 free_descs(nandc);
1716
1717 return ret;
1718}
1719
1720
1721static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1722 uint8_t *buf, int oob_required, int page)
1723{
1724 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1725 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1726 u8 *data_buf, *oob_buf = NULL;
1727 int ret;
1728
1729 nand_read_page_op(chip, page, 0, NULL, 0);
1730 data_buf = buf;
1731 oob_buf = oob_required ? chip->oob_poi : NULL;
1732
1733 clear_bam_transaction(nandc);
1734 ret = read_page_ecc(host, data_buf, oob_buf);
1735 if (ret) {
1736 dev_err(nandc->dev, "failure to read page\n");
1737 return ret;
1738 }
1739
1740 return parse_read_errors(host, data_buf, oob_buf);
1741}
1742
1743
1744static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1745 struct nand_chip *chip, uint8_t *buf,
1746 int oob_required, int page)
1747{
1748 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1749 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1750 u8 *data_buf, *oob_buf;
1751 struct nand_ecc_ctrl *ecc = &chip->ecc;
1752 int i, ret;
1753 int read_loc;
1754
1755 nand_read_page_op(chip, page, 0, NULL, 0);
1756 data_buf = buf;
1757 oob_buf = chip->oob_poi;
1758
1759 host->use_ecc = false;
1760
1761 clear_bam_transaction(nandc);
1762 update_rw_regs(host, ecc->steps, true);
1763 config_nand_page_read(nandc);
1764
1765 for (i = 0; i < ecc->steps; i++) {
1766 int data_size1, data_size2, oob_size1, oob_size2;
1767 int reg_off = FLASH_BUF_ACC;
1768
1769 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1770 oob_size1 = host->bbm_size;
1771
1772 if (i == (ecc->steps - 1)) {
1773 data_size2 = ecc->size - data_size1 -
1774 ((ecc->steps - 1) << 2);
1775 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1776 host->spare_bytes;
1777 } else {
1778 data_size2 = host->cw_data - data_size1;
1779 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1780 }
1781
1782 if (nandc->props->is_bam) {
1783 read_loc = 0;
1784 nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1785 read_loc += data_size1;
1786
1787 nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1788 read_loc += oob_size1;
1789
1790 nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1791 read_loc += data_size2;
1792
1793 nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1794 }
1795
1796 config_nand_cw_read(nandc);
1797
1798 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1799 reg_off += data_size1;
1800 data_buf += data_size1;
1801
1802 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1803 reg_off += oob_size1;
1804 oob_buf += oob_size1;
1805
1806 read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
1807 reg_off += data_size2;
1808 data_buf += data_size2;
1809
1810 read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
1811 oob_buf += oob_size2;
1812 }
1813
1814 ret = submit_descs(nandc);
1815 if (ret)
1816 dev_err(nandc->dev, "failure to read raw page\n");
1817
1818 free_descs(nandc);
1819
1820 return 0;
1821}
1822
1823
1824static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1825 int page)
1826{
1827 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1828 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1829 struct nand_ecc_ctrl *ecc = &chip->ecc;
1830 int ret;
1831
1832 clear_read_regs(nandc);
1833 clear_bam_transaction(nandc);
1834
1835 host->use_ecc = true;
1836 set_address(host, 0, page);
1837 update_rw_regs(host, ecc->steps, true);
1838
1839 ret = read_page_ecc(host, NULL, chip->oob_poi);
1840 if (ret)
1841 dev_err(nandc->dev, "failure to read oob\n");
1842
1843 return ret;
1844}
1845
1846
1847static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1848 const uint8_t *buf, int oob_required, int page)
1849{
1850 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1851 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1852 struct nand_ecc_ctrl *ecc = &chip->ecc;
1853 u8 *data_buf, *oob_buf;
1854 int i, ret;
1855
1856 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1857
1858 clear_read_regs(nandc);
1859 clear_bam_transaction(nandc);
1860
1861 data_buf = (u8 *)buf;
1862 oob_buf = chip->oob_poi;
1863
1864 host->use_ecc = true;
1865 update_rw_regs(host, ecc->steps, false);
1866 config_nand_page_write(nandc);
1867
1868 for (i = 0; i < ecc->steps; i++) {
1869 int data_size, oob_size;
1870
1871 if (i == (ecc->steps - 1)) {
1872 data_size = ecc->size - ((ecc->steps - 1) << 2);
1873 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1874 host->spare_bytes;
1875 } else {
1876 data_size = host->cw_data;
1877 oob_size = ecc->bytes;
1878 }
1879
1880
1881 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
1882 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
1883
1884
1885
1886
1887
1888
1889
1890
1891 if (i == (ecc->steps - 1)) {
1892 oob_buf += host->bbm_size;
1893
1894 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
1895 oob_buf, oob_size, 0);
1896 }
1897
1898 config_nand_cw_write(nandc);
1899
1900 data_buf += data_size;
1901 oob_buf += oob_size;
1902 }
1903
1904 ret = submit_descs(nandc);
1905 if (ret)
1906 dev_err(nandc->dev, "failure to write page\n");
1907
1908 free_descs(nandc);
1909
1910 if (!ret)
1911 ret = nand_prog_page_end_op(chip);
1912
1913 return ret;
1914}
1915
1916
1917static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1918 struct nand_chip *chip, const uint8_t *buf,
1919 int oob_required, int page)
1920{
1921 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1922 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1923 struct nand_ecc_ctrl *ecc = &chip->ecc;
1924 u8 *data_buf, *oob_buf;
1925 int i, ret;
1926
1927 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1928 clear_read_regs(nandc);
1929 clear_bam_transaction(nandc);
1930
1931 data_buf = (u8 *)buf;
1932 oob_buf = chip->oob_poi;
1933
1934 host->use_ecc = false;
1935 update_rw_regs(host, ecc->steps, false);
1936 config_nand_page_write(nandc);
1937
1938 for (i = 0; i < ecc->steps; i++) {
1939 int data_size1, data_size2, oob_size1, oob_size2;
1940 int reg_off = FLASH_BUF_ACC;
1941
1942 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1943 oob_size1 = host->bbm_size;
1944
1945 if (i == (ecc->steps - 1)) {
1946 data_size2 = ecc->size - data_size1 -
1947 ((ecc->steps - 1) << 2);
1948 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1949 host->spare_bytes;
1950 } else {
1951 data_size2 = host->cw_data - data_size1;
1952 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1953 }
1954
1955 write_data_dma(nandc, reg_off, data_buf, data_size1,
1956 NAND_BAM_NO_EOT);
1957 reg_off += data_size1;
1958 data_buf += data_size1;
1959
1960 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
1961 NAND_BAM_NO_EOT);
1962 reg_off += oob_size1;
1963 oob_buf += oob_size1;
1964
1965 write_data_dma(nandc, reg_off, data_buf, data_size2,
1966 NAND_BAM_NO_EOT);
1967 reg_off += data_size2;
1968 data_buf += data_size2;
1969
1970 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
1971 oob_buf += oob_size2;
1972
1973 config_nand_cw_write(nandc);
1974 }
1975
1976 ret = submit_descs(nandc);
1977 if (ret)
1978 dev_err(nandc->dev, "failure to write raw page\n");
1979
1980 free_descs(nandc);
1981
1982 if (!ret)
1983 ret = nand_prog_page_end_op(chip);
1984
1985 return ret;
1986}
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1998 int page)
1999{
2000 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2001 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2002 struct nand_ecc_ctrl *ecc = &chip->ecc;
2003 u8 *oob = chip->oob_poi;
2004 int data_size, oob_size;
2005 int ret;
2006
2007 host->use_ecc = true;
2008
2009 clear_bam_transaction(nandc);
2010 ret = copy_last_cw(host, page);
2011 if (ret)
2012 return ret;
2013
2014 clear_read_regs(nandc);
2015 clear_bam_transaction(nandc);
2016
2017
2018 data_size = ecc->size - ((ecc->steps - 1) << 2);
2019 oob_size = mtd->oobavail;
2020
2021
2022 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2023 0, mtd->oobavail);
2024
2025 set_address(host, host->cw_size * (ecc->steps - 1), page);
2026 update_rw_regs(host, 1, false);
2027
2028 config_nand_page_write(nandc);
2029 write_data_dma(nandc, FLASH_BUF_ACC,
2030 nandc->data_buffer, data_size + oob_size, 0);
2031 config_nand_cw_write(nandc);
2032
2033 ret = submit_descs(nandc);
2034
2035 free_descs(nandc);
2036
2037 if (ret) {
2038 dev_err(nandc->dev, "failure to write oob\n");
2039 return -EIO;
2040 }
2041
2042 return nand_prog_page_end_op(chip);
2043}
2044
2045static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
2046{
2047 struct nand_chip *chip = mtd_to_nand(mtd);
2048 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2049 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2050 struct nand_ecc_ctrl *ecc = &chip->ecc;
2051 int page, ret, bbpos, bad = 0;
2052 u32 flash_status;
2053
2054 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2055
2056
2057
2058
2059
2060
2061
2062 host->use_ecc = false;
2063
2064 clear_bam_transaction(nandc);
2065 ret = copy_last_cw(host, page);
2066 if (ret)
2067 goto err;
2068
2069 flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
2070
2071 if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
2072 dev_warn(nandc->dev, "error when trying to read BBM\n");
2073 goto err;
2074 }
2075
2076 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2077
2078 bad = nandc->data_buffer[bbpos] != 0xff;
2079
2080 if (chip->options & NAND_BUSWIDTH_16)
2081 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2082err:
2083 return bad;
2084}
2085
2086static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
2087{
2088 struct nand_chip *chip = mtd_to_nand(mtd);
2089 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2090 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2091 struct nand_ecc_ctrl *ecc = &chip->ecc;
2092 int page, ret;
2093
2094 clear_read_regs(nandc);
2095 clear_bam_transaction(nandc);
2096
2097
2098
2099
2100
2101
2102 memset(nandc->data_buffer, 0x00, host->cw_size);
2103
2104 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2105
2106
2107 host->use_ecc = false;
2108 set_address(host, host->cw_size * (ecc->steps - 1), page);
2109 update_rw_regs(host, 1, false);
2110
2111 config_nand_page_write(nandc);
2112 write_data_dma(nandc, FLASH_BUF_ACC,
2113 nandc->data_buffer, host->cw_size, 0);
2114 config_nand_cw_write(nandc);
2115
2116 ret = submit_descs(nandc);
2117
2118 free_descs(nandc);
2119
2120 if (ret) {
2121 dev_err(nandc->dev, "failure to update BBM\n");
2122 return -EIO;
2123 }
2124
2125 return nand_prog_page_end_op(chip);
2126}
2127
2128
2129
2130
2131
2132
2133
2134static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
2135{
2136 struct nand_chip *chip = mtd_to_nand(mtd);
2137 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2138 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2139 u8 *buf = nandc->data_buffer;
2140 u8 ret = 0x0;
2141
2142 if (host->last_command == NAND_CMD_STATUS) {
2143 ret = host->status;
2144
2145 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2146
2147 return ret;
2148 }
2149
2150 if (nandc->buf_start < nandc->buf_count)
2151 ret = buf[nandc->buf_start++];
2152
2153 return ret;
2154}
2155
2156static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
2157{
2158 struct nand_chip *chip = mtd_to_nand(mtd);
2159 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2160 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2161
2162 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2163 nandc->buf_start += real_len;
2164}
2165
2166static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
2167 int len)
2168{
2169 struct nand_chip *chip = mtd_to_nand(mtd);
2170 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2171 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2172
2173 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2174
2175 nandc->buf_start += real_len;
2176}
2177
2178
2179static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
2180{
2181 struct nand_chip *chip = mtd_to_nand(mtd);
2182 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2183
2184 if (chipnr <= 0)
2185 return;
2186
2187 dev_warn(nandc->dev, "invalid chip select\n");
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2276 struct mtd_oob_region *oobregion)
2277{
2278 struct nand_chip *chip = mtd_to_nand(mtd);
2279 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2280 struct nand_ecc_ctrl *ecc = &chip->ecc;
2281
2282 if (section > 1)
2283 return -ERANGE;
2284
2285 if (!section) {
2286 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2287 host->bbm_size;
2288 oobregion->offset = 0;
2289 } else {
2290 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2291 oobregion->offset = mtd->oobsize - oobregion->length;
2292 }
2293
2294 return 0;
2295}
2296
2297static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2298 struct mtd_oob_region *oobregion)
2299{
2300 struct nand_chip *chip = mtd_to_nand(mtd);
2301 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2302 struct nand_ecc_ctrl *ecc = &chip->ecc;
2303
2304 if (section)
2305 return -ERANGE;
2306
2307 oobregion->length = ecc->steps * 4;
2308 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2309
2310 return 0;
2311}
2312
2313static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2314 .ecc = qcom_nand_ooblayout_ecc,
2315 .free = qcom_nand_ooblayout_free,
2316};
2317
2318static int qcom_nand_host_setup(struct qcom_nand_host *host)
2319{
2320 struct nand_chip *chip = &host->chip;
2321 struct mtd_info *mtd = nand_to_mtd(chip);
2322 struct nand_ecc_ctrl *ecc = &chip->ecc;
2323 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2324 int cwperpage, bad_block_byte;
2325 bool wide_bus;
2326 int ecc_mode = 1;
2327
2328
2329
2330
2331
2332 if (ecc->size != NANDC_STEP_SIZE) {
2333 dev_err(nandc->dev, "invalid ecc size\n");
2334 return -EINVAL;
2335 }
2336
2337 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2338
2339 if (ecc->strength >= 8) {
2340
2341 host->bch_enabled = true;
2342 ecc_mode = 1;
2343
2344 if (wide_bus) {
2345 host->ecc_bytes_hw = 14;
2346 host->spare_bytes = 0;
2347 host->bbm_size = 2;
2348 } else {
2349 host->ecc_bytes_hw = 13;
2350 host->spare_bytes = 2;
2351 host->bbm_size = 1;
2352 }
2353 } else {
2354
2355
2356
2357
2358
2359 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2360
2361 host->bch_enabled = true;
2362 ecc_mode = 0;
2363
2364 if (wide_bus) {
2365 host->ecc_bytes_hw = 8;
2366 host->spare_bytes = 2;
2367 host->bbm_size = 2;
2368 } else {
2369 host->ecc_bytes_hw = 7;
2370 host->spare_bytes = 4;
2371 host->bbm_size = 1;
2372 }
2373 } else {
2374
2375 host->ecc_bytes_hw = 10;
2376
2377 if (wide_bus) {
2378 host->spare_bytes = 0;
2379 host->bbm_size = 2;
2380 } else {
2381 host->spare_bytes = 1;
2382 host->bbm_size = 1;
2383 }
2384 }
2385 }
2386
2387
2388
2389
2390
2391
2392
2393 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2394
2395 ecc->read_page = qcom_nandc_read_page;
2396 ecc->read_page_raw = qcom_nandc_read_page_raw;
2397 ecc->read_oob = qcom_nandc_read_oob;
2398 ecc->write_page = qcom_nandc_write_page;
2399 ecc->write_page_raw = qcom_nandc_write_page_raw;
2400 ecc->write_oob = qcom_nandc_write_oob;
2401
2402 ecc->mode = NAND_ECC_HW;
2403
2404 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2405
2406 cwperpage = mtd->writesize / ecc->size;
2407 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2408 cwperpage);
2409
2410
2411
2412
2413
2414
2415 host->cw_data = 516;
2416
2417
2418
2419
2420
2421 host->cw_size = host->cw_data + ecc->bytes;
2422
2423 if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
2424 dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
2425 return -EINVAL;
2426 }
2427
2428 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2429
2430 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2431 | host->cw_data << UD_SIZE_BYTES
2432 | 0 << DISABLE_STATUS_AFTER_WRITE
2433 | 5 << NUM_ADDR_CYCLES
2434 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2435 | 0 << STATUS_BFR_READ
2436 | 1 << SET_RD_MODE_AFTER_STATUS
2437 | host->spare_bytes << SPARE_SIZE_BYTES;
2438
2439 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2440 | 0 << CS_ACTIVE_BSY
2441 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2442 | 0 << BAD_BLOCK_IN_SPARE_AREA
2443 | 2 << WR_RD_BSY_GAP
2444 | wide_bus << WIDE_FLASH
2445 | host->bch_enabled << ENABLE_BCH_ECC;
2446
2447 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2448 | host->cw_size << UD_SIZE_BYTES
2449 | 5 << NUM_ADDR_CYCLES
2450 | 0 << SPARE_SIZE_BYTES;
2451
2452 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2453 | 0 << CS_ACTIVE_BSY
2454 | 17 << BAD_BLOCK_BYTE_NUM
2455 | 1 << BAD_BLOCK_IN_SPARE_AREA
2456 | 2 << WR_RD_BSY_GAP
2457 | wide_bus << WIDE_FLASH
2458 | 1 << DEV0_CFG1_ECC_DISABLE;
2459
2460 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2461 | 0 << ECC_SW_RESET
2462 | host->cw_data << ECC_NUM_DATA_BYTES
2463 | 1 << ECC_FORCE_CLK_OPEN
2464 | ecc_mode << ECC_MODE
2465 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2466
2467 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2468
2469 host->clrflashstatus = FS_READY_BSY_N;
2470 host->clrreadstatus = 0xc0;
2471 nandc->regs->erased_cw_detect_cfg_clr =
2472 cpu_to_le32(CLR_ERASED_PAGE_DET);
2473 nandc->regs->erased_cw_detect_cfg_set =
2474 cpu_to_le32(SET_ERASED_PAGE_DET);
2475
2476 dev_dbg(nandc->dev,
2477 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2478 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2479 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2480 cwperpage);
2481
2482 return 0;
2483}
2484
2485static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2486{
2487 int ret;
2488
2489 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2490 if (ret) {
2491 dev_err(nandc->dev, "failed to set DMA mask\n");
2492 return ret;
2493 }
2494
2495
2496
2497
2498
2499
2500
2501 nandc->buf_size = 532;
2502
2503 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2504 GFP_KERNEL);
2505 if (!nandc->data_buffer)
2506 return -ENOMEM;
2507
2508 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2509 GFP_KERNEL);
2510 if (!nandc->regs)
2511 return -ENOMEM;
2512
2513 nandc->reg_read_buf = devm_kzalloc(nandc->dev,
2514 MAX_REG_RD * sizeof(*nandc->reg_read_buf),
2515 GFP_KERNEL);
2516 if (!nandc->reg_read_buf)
2517 return -ENOMEM;
2518
2519 if (nandc->props->is_bam) {
2520 nandc->reg_read_dma =
2521 dma_map_single(nandc->dev, nandc->reg_read_buf,
2522 MAX_REG_RD *
2523 sizeof(*nandc->reg_read_buf),
2524 DMA_FROM_DEVICE);
2525 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2526 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2527 return -EIO;
2528 }
2529
2530 nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
2531 if (!nandc->tx_chan) {
2532 dev_err(nandc->dev, "failed to request tx channel\n");
2533 return -ENODEV;
2534 }
2535
2536 nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
2537 if (!nandc->rx_chan) {
2538 dev_err(nandc->dev, "failed to request rx channel\n");
2539 return -ENODEV;
2540 }
2541
2542 nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
2543 if (!nandc->cmd_chan) {
2544 dev_err(nandc->dev, "failed to request cmd channel\n");
2545 return -ENODEV;
2546 }
2547
2548
2549
2550
2551
2552
2553
2554 nandc->max_cwperpage = 1;
2555 nandc->bam_txn = alloc_bam_transaction(nandc);
2556 if (!nandc->bam_txn) {
2557 dev_err(nandc->dev,
2558 "failed to allocate bam transaction\n");
2559 return -ENOMEM;
2560 }
2561 } else {
2562 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
2563 if (!nandc->chan) {
2564 dev_err(nandc->dev,
2565 "failed to request slave channel\n");
2566 return -ENODEV;
2567 }
2568 }
2569
2570 INIT_LIST_HEAD(&nandc->desc_list);
2571 INIT_LIST_HEAD(&nandc->host_list);
2572
2573 nand_hw_control_init(&nandc->controller);
2574
2575 return 0;
2576}
2577
2578static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2579{
2580 if (nandc->props->is_bam) {
2581 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2582 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2583 MAX_REG_RD *
2584 sizeof(*nandc->reg_read_buf),
2585 DMA_FROM_DEVICE);
2586
2587 if (nandc->tx_chan)
2588 dma_release_channel(nandc->tx_chan);
2589
2590 if (nandc->rx_chan)
2591 dma_release_channel(nandc->rx_chan);
2592
2593 if (nandc->cmd_chan)
2594 dma_release_channel(nandc->cmd_chan);
2595 } else {
2596 if (nandc->chan)
2597 dma_release_channel(nandc->chan);
2598 }
2599}
2600
2601
2602static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2603{
2604 u32 nand_ctrl;
2605
2606
2607 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2608 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2609 NAND_DEV_CMD_VLD_VAL);
2610
2611
2612 if (nandc->props->is_bam) {
2613 nand_ctrl = nandc_read(nandc, NAND_CTRL);
2614 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2615 } else {
2616 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2617 }
2618
2619
2620 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2621 nandc->vld = NAND_DEV_CMD_VLD_VAL;
2622
2623 return 0;
2624}
2625
2626static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2627 struct qcom_nand_host *host,
2628 struct device_node *dn)
2629{
2630 struct nand_chip *chip = &host->chip;
2631 struct mtd_info *mtd = nand_to_mtd(chip);
2632 struct device *dev = nandc->dev;
2633 int ret;
2634
2635 ret = of_property_read_u32(dn, "reg", &host->cs);
2636 if (ret) {
2637 dev_err(dev, "can't get chip-select\n");
2638 return -ENXIO;
2639 }
2640
2641 nand_set_flash_node(chip, dn);
2642 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2643 if (!mtd->name)
2644 return -ENOMEM;
2645
2646 mtd->owner = THIS_MODULE;
2647 mtd->dev.parent = dev;
2648
2649 chip->cmdfunc = qcom_nandc_command;
2650 chip->select_chip = qcom_nandc_select_chip;
2651 chip->read_byte = qcom_nandc_read_byte;
2652 chip->read_buf = qcom_nandc_read_buf;
2653 chip->write_buf = qcom_nandc_write_buf;
2654 chip->set_features = nand_get_set_features_notsupp;
2655 chip->get_features = nand_get_set_features_notsupp;
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665 chip->block_bad = qcom_nandc_block_bad;
2666 chip->block_markbad = qcom_nandc_block_markbad;
2667
2668 chip->controller = &nandc->controller;
2669 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2670 NAND_SKIP_BBTSCAN;
2671
2672
2673 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2674
2675 ret = nand_scan_ident(mtd, 1, NULL);
2676 if (ret)
2677 return ret;
2678
2679 ret = qcom_nand_host_setup(host);
2680
2681 return ret;
2682}
2683
2684static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
2685 struct qcom_nand_host *host,
2686 struct device_node *dn)
2687{
2688 struct nand_chip *chip = &host->chip;
2689 struct mtd_info *mtd = nand_to_mtd(chip);
2690 int ret;
2691
2692 ret = nand_scan_tail(mtd);
2693 if (ret)
2694 return ret;
2695
2696 ret = mtd_device_register(mtd, NULL, 0);
2697 if (ret)
2698 nand_cleanup(mtd_to_nand(mtd));
2699
2700 return ret;
2701}
2702
2703static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2704{
2705 struct device *dev = nandc->dev;
2706 struct device_node *dn = dev->of_node, *child;
2707 struct qcom_nand_host *host, *tmp;
2708 int ret;
2709
2710 for_each_available_child_of_node(dn, child) {
2711 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2712 if (!host) {
2713 of_node_put(child);
2714 return -ENOMEM;
2715 }
2716
2717 ret = qcom_nand_host_init(nandc, host, child);
2718 if (ret) {
2719 devm_kfree(dev, host);
2720 continue;
2721 }
2722
2723 list_add_tail(&host->node, &nandc->host_list);
2724 }
2725
2726 if (list_empty(&nandc->host_list))
2727 return -ENODEV;
2728
2729 if (nandc->props->is_bam) {
2730 free_bam_transaction(nandc);
2731 nandc->bam_txn = alloc_bam_transaction(nandc);
2732 if (!nandc->bam_txn) {
2733 dev_err(nandc->dev,
2734 "failed to allocate bam transaction\n");
2735 return -ENOMEM;
2736 }
2737 }
2738
2739 list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
2740 ret = qcom_nand_mtd_register(nandc, host, child);
2741 if (ret) {
2742 list_del(&host->node);
2743 devm_kfree(dev, host);
2744 }
2745 }
2746
2747 if (list_empty(&nandc->host_list))
2748 return -ENODEV;
2749
2750 return 0;
2751}
2752
2753
2754static int qcom_nandc_parse_dt(struct platform_device *pdev)
2755{
2756 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2757 struct device_node *np = nandc->dev->of_node;
2758 int ret;
2759
2760 if (!nandc->props->is_bam) {
2761 ret = of_property_read_u32(np, "qcom,cmd-crci",
2762 &nandc->cmd_crci);
2763 if (ret) {
2764 dev_err(nandc->dev, "command CRCI unspecified\n");
2765 return ret;
2766 }
2767
2768 ret = of_property_read_u32(np, "qcom,data-crci",
2769 &nandc->data_crci);
2770 if (ret) {
2771 dev_err(nandc->dev, "data CRCI unspecified\n");
2772 return ret;
2773 }
2774 }
2775
2776 return 0;
2777}
2778
2779static int qcom_nandc_probe(struct platform_device *pdev)
2780{
2781 struct qcom_nand_controller *nandc;
2782 const void *dev_data;
2783 struct device *dev = &pdev->dev;
2784 struct resource *res;
2785 int ret;
2786
2787 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2788 if (!nandc)
2789 return -ENOMEM;
2790
2791 platform_set_drvdata(pdev, nandc);
2792 nandc->dev = dev;
2793
2794 dev_data = of_device_get_match_data(dev);
2795 if (!dev_data) {
2796 dev_err(&pdev->dev, "failed to get device data\n");
2797 return -ENODEV;
2798 }
2799
2800 nandc->props = dev_data;
2801
2802 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2803 nandc->base = devm_ioremap_resource(dev, res);
2804 if (IS_ERR(nandc->base))
2805 return PTR_ERR(nandc->base);
2806
2807 nandc->base_phys = res->start;
2808 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
2809
2810 nandc->core_clk = devm_clk_get(dev, "core");
2811 if (IS_ERR(nandc->core_clk))
2812 return PTR_ERR(nandc->core_clk);
2813
2814 nandc->aon_clk = devm_clk_get(dev, "aon");
2815 if (IS_ERR(nandc->aon_clk))
2816 return PTR_ERR(nandc->aon_clk);
2817
2818 ret = qcom_nandc_parse_dt(pdev);
2819 if (ret)
2820 return ret;
2821
2822 ret = qcom_nandc_alloc(nandc);
2823 if (ret)
2824 goto err_core_clk;
2825
2826 ret = clk_prepare_enable(nandc->core_clk);
2827 if (ret)
2828 goto err_core_clk;
2829
2830 ret = clk_prepare_enable(nandc->aon_clk);
2831 if (ret)
2832 goto err_aon_clk;
2833
2834 ret = qcom_nandc_setup(nandc);
2835 if (ret)
2836 goto err_setup;
2837
2838 ret = qcom_probe_nand_devices(nandc);
2839 if (ret)
2840 goto err_setup;
2841
2842 return 0;
2843
2844err_setup:
2845 clk_disable_unprepare(nandc->aon_clk);
2846err_aon_clk:
2847 clk_disable_unprepare(nandc->core_clk);
2848err_core_clk:
2849 qcom_nandc_unalloc(nandc);
2850
2851 return ret;
2852}
2853
2854static int qcom_nandc_remove(struct platform_device *pdev)
2855{
2856 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2857 struct qcom_nand_host *host;
2858
2859 list_for_each_entry(host, &nandc->host_list, node)
2860 nand_release(nand_to_mtd(&host->chip));
2861
2862 qcom_nandc_unalloc(nandc);
2863
2864 clk_disable_unprepare(nandc->aon_clk);
2865 clk_disable_unprepare(nandc->core_clk);
2866
2867 return 0;
2868}
2869
2870static const struct qcom_nandc_props ipq806x_nandc_props = {
2871 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
2872 .is_bam = false,
2873 .dev_cmd_reg_start = 0x0,
2874};
2875
2876static const struct qcom_nandc_props ipq4019_nandc_props = {
2877 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2878 .is_bam = true,
2879 .dev_cmd_reg_start = 0x0,
2880};
2881
2882static const struct qcom_nandc_props ipq8074_nandc_props = {
2883 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2884 .is_bam = true,
2885 .dev_cmd_reg_start = 0x7000,
2886};
2887
2888
2889
2890
2891
2892static const struct of_device_id qcom_nandc_of_match[] = {
2893 {
2894 .compatible = "qcom,ipq806x-nand",
2895 .data = &ipq806x_nandc_props,
2896 },
2897 {
2898 .compatible = "qcom,ipq4019-nand",
2899 .data = &ipq4019_nandc_props,
2900 },
2901 {
2902 .compatible = "qcom,ipq8074-nand",
2903 .data = &ipq8074_nandc_props,
2904 },
2905 {}
2906};
2907MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
2908
2909static struct platform_driver qcom_nandc_driver = {
2910 .driver = {
2911 .name = "qcom-nandc",
2912 .of_match_table = qcom_nandc_of_match,
2913 },
2914 .probe = qcom_nandc_probe,
2915 .remove = qcom_nandc_remove,
2916};
2917module_platform_driver(qcom_nandc_driver);
2918
2919MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
2920MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
2921MODULE_LICENSE("GPL v2");
2922