1
2
3
4
5
6#include <linux/clk.h>
7#include <linux/slab.h>
8#include <linux/bitops.h>
9#include <linux/dma-mapping.h>
10#include <linux/dmaengine.h>
11#include <linux/module.h>
12#include <linux/mtd/rawnand.h>
13#include <linux/mtd/partitions.h>
14#include <linux/of.h>
15#include <linux/of_device.h>
16#include <linux/delay.h>
17#include <linux/dma/qcom_bam_dma.h>
18
19
20#define NAND_FLASH_CMD 0x00
21#define NAND_ADDR0 0x04
22#define NAND_ADDR1 0x08
23#define NAND_FLASH_CHIP_SELECT 0x0c
24#define NAND_EXEC_CMD 0x10
25#define NAND_FLASH_STATUS 0x14
26#define NAND_BUFFER_STATUS 0x18
27#define NAND_DEV0_CFG0 0x20
28#define NAND_DEV0_CFG1 0x24
29#define NAND_DEV0_ECC_CFG 0x28
30#define NAND_DEV1_ECC_CFG 0x2c
31#define NAND_DEV1_CFG0 0x30
32#define NAND_DEV1_CFG1 0x34
33#define NAND_READ_ID 0x40
34#define NAND_READ_STATUS 0x44
35#define NAND_DEV_CMD0 0xa0
36#define NAND_DEV_CMD1 0xa4
37#define NAND_DEV_CMD2 0xa8
38#define NAND_DEV_CMD_VLD 0xac
39#define SFLASHC_BURST_CFG 0xe0
40#define NAND_ERASED_CW_DETECT_CFG 0xe8
41#define NAND_ERASED_CW_DETECT_STATUS 0xec
42#define NAND_EBI2_ECC_BUF_CFG 0xf0
43#define FLASH_BUF_ACC 0x100
44
45#define NAND_CTRL 0xf00
46#define NAND_VERSION 0xf08
47#define NAND_READ_LOCATION_0 0xf20
48#define NAND_READ_LOCATION_1 0xf24
49#define NAND_READ_LOCATION_2 0xf28
50#define NAND_READ_LOCATION_3 0xf2c
51
52
53#define NAND_DEV_CMD1_RESTORE 0xdead
54#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
55
56
57#define PAGE_ACC BIT(4)
58#define LAST_PAGE BIT(5)
59
60
61#define NAND_DEV_SEL 0
62#define DM_EN BIT(2)
63
64
65#define FS_OP_ERR BIT(4)
66#define FS_READY_BSY_N BIT(5)
67#define FS_MPU_ERR BIT(8)
68#define FS_DEVICE_STS_ERR BIT(16)
69#define FS_DEVICE_WP BIT(23)
70
71
72#define BS_UNCORRECTABLE_BIT BIT(8)
73#define BS_CORRECTABLE_ERR_MSK 0x1f
74
75
76#define DISABLE_STATUS_AFTER_WRITE 4
77#define CW_PER_PAGE 6
78#define UD_SIZE_BYTES 9
79#define ECC_PARITY_SIZE_BYTES_RS 19
80#define SPARE_SIZE_BYTES 23
81#define NUM_ADDR_CYCLES 27
82#define STATUS_BFR_READ 30
83#define SET_RD_MODE_AFTER_STATUS 31
84
85
86#define DEV0_CFG1_ECC_DISABLE 0
87#define WIDE_FLASH 1
88#define NAND_RECOVERY_CYCLES 2
89#define CS_ACTIVE_BSY 5
90#define BAD_BLOCK_BYTE_NUM 6
91#define BAD_BLOCK_IN_SPARE_AREA 16
92#define WR_RD_BSY_GAP 17
93#define ENABLE_BCH_ECC 27
94
95
96#define ECC_CFG_ECC_DISABLE 0
97#define ECC_SW_RESET 1
98#define ECC_MODE 4
99#define ECC_PARITY_SIZE_BYTES_BCH 8
100#define ECC_NUM_DATA_BYTES 16
101#define ECC_FORCE_CLK_OPEN 30
102
103
104#define READ_ADDR 0
105
106
107#define READ_START_VLD BIT(0)
108#define READ_STOP_VLD BIT(1)
109#define WRITE_START_VLD BIT(2)
110#define ERASE_START_VLD BIT(3)
111#define SEQ_READ_START_VLD BIT(4)
112
113
114#define NUM_STEPS 0
115
116
117#define ERASED_CW_ECC_MASK 1
118#define AUTO_DETECT_RES 0
119#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
120#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
121#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
122#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
123#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
124
125
126#define PAGE_ALL_ERASED BIT(7)
127#define CODEWORD_ALL_ERASED BIT(6)
128#define PAGE_ERASED BIT(5)
129#define CODEWORD_ERASED BIT(4)
130#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
131#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
132
133
134#define READ_LOCATION_OFFSET 0
135#define READ_LOCATION_SIZE 16
136#define READ_LOCATION_LAST 31
137
138
139#define NAND_VERSION_MAJOR_MASK 0xf0000000
140#define NAND_VERSION_MAJOR_SHIFT 28
141#define NAND_VERSION_MINOR_MASK 0x0fff0000
142#define NAND_VERSION_MINOR_SHIFT 16
143
144
145#define OP_PAGE_READ 0x2
146#define OP_PAGE_READ_WITH_ECC 0x3
147#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
148#define OP_PROGRAM_PAGE 0x6
149#define OP_PAGE_PROGRAM_WITH_ECC 0x7
150#define OP_PROGRAM_PAGE_SPARE 0x9
151#define OP_BLOCK_ERASE 0xa
152#define OP_FETCH_ID 0xb
153#define OP_RESET_DEVICE 0xd
154
155
156#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
157 ERASE_START_VLD | SEQ_READ_START_VLD)
158
159
160#define BAM_MODE_EN BIT(0)
161
162
163
164
165
166#define NANDC_STEP_SIZE 512
167
168
169
170
171
172#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
173
174
175#define MAX_REG_RD (3 * MAX_NUM_STEPS)
176
177
178#define ECC_NONE BIT(0)
179#define ECC_RS_4BIT BIT(1)
180#define ECC_BCH_4BIT BIT(2)
181#define ECC_BCH_8BIT BIT(3)
182
183#define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
184nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
185 ((offset) << READ_LOCATION_OFFSET) | \
186 ((size) << READ_LOCATION_SIZE) | \
187 ((is_last) << READ_LOCATION_LAST))
188
189
190
191
192
193#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
194
195
196#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
197
198
199#define reg_buf_dma_addr(chip, vaddr) \
200 ((chip)->reg_read_dma + \
201 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
202
203#define QPIC_PER_CW_CMD_ELEMENTS 32
204#define QPIC_PER_CW_CMD_SGL 32
205#define QPIC_PER_CW_DATA_SGL 8
206
207#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
208
209
210
211
212
213
214#define NAND_BAM_NO_EOT BIT(0)
215
216#define NAND_BAM_NWD BIT(1)
217
218#define NAND_BAM_NEXT_SGL BIT(2)
219
220
221
222
223#define NAND_ERASED_CW_SET BIT(4)
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247struct bam_transaction {
248 struct bam_cmd_element *bam_ce;
249 struct scatterlist *cmd_sgl;
250 struct scatterlist *data_sgl;
251 u32 bam_ce_pos;
252 u32 bam_ce_start;
253 u32 cmd_sgl_pos;
254 u32 cmd_sgl_start;
255 u32 tx_sgl_pos;
256 u32 tx_sgl_start;
257 u32 rx_sgl_pos;
258 u32 rx_sgl_start;
259 bool wait_second_completion;
260 struct completion txn_done;
261 struct dma_async_tx_descriptor *last_data_desc;
262 struct dma_async_tx_descriptor *last_cmd_desc;
263};
264
265
266
267
268
269
270
271
272
273
274
275struct desc_info {
276 struct list_head node;
277
278 enum dma_data_direction dir;
279 union {
280 struct scatterlist adm_sgl;
281 struct {
282 struct scatterlist *bam_sgl;
283 int sgl_cnt;
284 };
285 };
286 struct dma_async_tx_descriptor *dma_desc;
287};
288
289
290
291
292
293struct nandc_regs {
294 __le32 cmd;
295 __le32 addr0;
296 __le32 addr1;
297 __le32 chip_sel;
298 __le32 exec;
299
300 __le32 cfg0;
301 __le32 cfg1;
302 __le32 ecc_bch_cfg;
303
304 __le32 clrflashstatus;
305 __le32 clrreadstatus;
306
307 __le32 cmd1;
308 __le32 vld;
309
310 __le32 orig_cmd1;
311 __le32 orig_vld;
312
313 __le32 ecc_buf_cfg;
314 __le32 read_location0;
315 __le32 read_location1;
316 __le32 read_location2;
317 __le32 read_location3;
318
319 __le32 erased_cw_detect_cfg_clr;
320 __le32 erased_cw_detect_cfg_set;
321};
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359struct qcom_nand_controller {
360 struct nand_controller controller;
361 struct list_head host_list;
362
363 struct device *dev;
364
365 void __iomem *base;
366 phys_addr_t base_phys;
367 dma_addr_t base_dma;
368
369 struct clk *core_clk;
370 struct clk *aon_clk;
371
372 union {
373
374 struct {
375 struct dma_chan *tx_chan;
376 struct dma_chan *rx_chan;
377 struct dma_chan *cmd_chan;
378 };
379
380
381 struct {
382 struct dma_chan *chan;
383 unsigned int cmd_crci;
384 unsigned int data_crci;
385 };
386 };
387
388 struct list_head desc_list;
389 struct bam_transaction *bam_txn;
390
391 u8 *data_buffer;
392 int buf_size;
393 int buf_count;
394 int buf_start;
395 unsigned int max_cwperpage;
396
397 __le32 *reg_read_buf;
398 dma_addr_t reg_read_dma;
399 int reg_read_pos;
400
401 struct nandc_regs *regs;
402
403 u32 cmd1, vld;
404 const struct qcom_nandc_props *props;
405};
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434struct qcom_nand_host {
435 struct nand_chip chip;
436 struct list_head node;
437
438 int cs;
439 int cw_size;
440 int cw_data;
441 bool use_ecc;
442 bool bch_enabled;
443 int ecc_bytes_hw;
444 int spare_bytes;
445 int bbm_size;
446 u8 status;
447 int last_command;
448
449 u32 cfg0, cfg1;
450 u32 cfg0_raw, cfg1_raw;
451 u32 ecc_buf_cfg;
452 u32 ecc_bch_cfg;
453 u32 clrflashstatus;
454 u32 clrreadstatus;
455};
456
457
458
459
460
461
462
463
464struct qcom_nandc_props {
465 u32 ecc_modes;
466 bool is_bam;
467 u32 dev_cmd_reg_start;
468};
469
470
471static void free_bam_transaction(struct qcom_nand_controller *nandc)
472{
473 struct bam_transaction *bam_txn = nandc->bam_txn;
474
475 devm_kfree(nandc->dev, bam_txn);
476}
477
478
479static struct bam_transaction *
480alloc_bam_transaction(struct qcom_nand_controller *nandc)
481{
482 struct bam_transaction *bam_txn;
483 size_t bam_txn_size;
484 unsigned int num_cw = nandc->max_cwperpage;
485 void *bam_txn_buf;
486
487 bam_txn_size =
488 sizeof(*bam_txn) + num_cw *
489 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
490 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
491 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
492
493 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
494 if (!bam_txn_buf)
495 return NULL;
496
497 bam_txn = bam_txn_buf;
498 bam_txn_buf += sizeof(*bam_txn);
499
500 bam_txn->bam_ce = bam_txn_buf;
501 bam_txn_buf +=
502 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
503
504 bam_txn->cmd_sgl = bam_txn_buf;
505 bam_txn_buf +=
506 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
507
508 bam_txn->data_sgl = bam_txn_buf;
509
510 init_completion(&bam_txn->txn_done);
511
512 return bam_txn;
513}
514
515
516static void clear_bam_transaction(struct qcom_nand_controller *nandc)
517{
518 struct bam_transaction *bam_txn = nandc->bam_txn;
519
520 if (!nandc->props->is_bam)
521 return;
522
523 bam_txn->bam_ce_pos = 0;
524 bam_txn->bam_ce_start = 0;
525 bam_txn->cmd_sgl_pos = 0;
526 bam_txn->cmd_sgl_start = 0;
527 bam_txn->tx_sgl_pos = 0;
528 bam_txn->tx_sgl_start = 0;
529 bam_txn->rx_sgl_pos = 0;
530 bam_txn->rx_sgl_start = 0;
531 bam_txn->last_data_desc = NULL;
532 bam_txn->wait_second_completion = false;
533
534 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
535 QPIC_PER_CW_CMD_SGL);
536 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
537 QPIC_PER_CW_DATA_SGL);
538
539 reinit_completion(&bam_txn->txn_done);
540}
541
542
543static void qpic_bam_dma_done(void *data)
544{
545 struct bam_transaction *bam_txn = data;
546
547
548
549
550
551
552
553
554 if (bam_txn->wait_second_completion)
555 bam_txn->wait_second_completion = false;
556 else
557 complete(&bam_txn->txn_done);
558}
559
560static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
561{
562 return container_of(chip, struct qcom_nand_host, chip);
563}
564
565static inline struct qcom_nand_controller *
566get_qcom_nand_controller(struct nand_chip *chip)
567{
568 return container_of(chip->controller, struct qcom_nand_controller,
569 controller);
570}
571
572static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
573{
574 return ioread32(nandc->base + offset);
575}
576
577static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
578 u32 val)
579{
580 iowrite32(val, nandc->base + offset);
581}
582
583static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
584 bool is_cpu)
585{
586 if (!nandc->props->is_bam)
587 return;
588
589 if (is_cpu)
590 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
591 MAX_REG_RD *
592 sizeof(*nandc->reg_read_buf),
593 DMA_FROM_DEVICE);
594 else
595 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
596 MAX_REG_RD *
597 sizeof(*nandc->reg_read_buf),
598 DMA_FROM_DEVICE);
599}
600
601static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
602{
603 switch (offset) {
604 case NAND_FLASH_CMD:
605 return ®s->cmd;
606 case NAND_ADDR0:
607 return ®s->addr0;
608 case NAND_ADDR1:
609 return ®s->addr1;
610 case NAND_FLASH_CHIP_SELECT:
611 return ®s->chip_sel;
612 case NAND_EXEC_CMD:
613 return ®s->exec;
614 case NAND_FLASH_STATUS:
615 return ®s->clrflashstatus;
616 case NAND_DEV0_CFG0:
617 return ®s->cfg0;
618 case NAND_DEV0_CFG1:
619 return ®s->cfg1;
620 case NAND_DEV0_ECC_CFG:
621 return ®s->ecc_bch_cfg;
622 case NAND_READ_STATUS:
623 return ®s->clrreadstatus;
624 case NAND_DEV_CMD1:
625 return ®s->cmd1;
626 case NAND_DEV_CMD1_RESTORE:
627 return ®s->orig_cmd1;
628 case NAND_DEV_CMD_VLD:
629 return ®s->vld;
630 case NAND_DEV_CMD_VLD_RESTORE:
631 return ®s->orig_vld;
632 case NAND_EBI2_ECC_BUF_CFG:
633 return ®s->ecc_buf_cfg;
634 case NAND_READ_LOCATION_0:
635 return ®s->read_location0;
636 case NAND_READ_LOCATION_1:
637 return ®s->read_location1;
638 case NAND_READ_LOCATION_2:
639 return ®s->read_location2;
640 case NAND_READ_LOCATION_3:
641 return ®s->read_location3;
642 default:
643 return NULL;
644 }
645}
646
647static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
648 u32 val)
649{
650 struct nandc_regs *regs = nandc->regs;
651 __le32 *reg;
652
653 reg = offset_to_nandc_reg(regs, offset);
654
655 if (reg)
656 *reg = cpu_to_le32(val);
657}
658
659
660static void set_address(struct qcom_nand_host *host, u16 column, int page)
661{
662 struct nand_chip *chip = &host->chip;
663 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
664
665 if (chip->options & NAND_BUSWIDTH_16)
666 column >>= 1;
667
668 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
669 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
670}
671
672
673
674
675
676
677
678
679static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
680{
681 struct nand_chip *chip = &host->chip;
682 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
683 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
684
685 if (read) {
686 if (host->use_ecc)
687 cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
688 else
689 cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
690 } else {
691 cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
692 }
693
694 if (host->use_ecc) {
695 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
696 (num_cw - 1) << CW_PER_PAGE;
697
698 cfg1 = host->cfg1;
699 ecc_bch_cfg = host->ecc_bch_cfg;
700 } else {
701 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
702 (num_cw - 1) << CW_PER_PAGE;
703
704 cfg1 = host->cfg1_raw;
705 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
706 }
707
708 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
709 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
710 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
711 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
712 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
713 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
714 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
715 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
716
717 if (read)
718 nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
719 host->cw_data : host->cw_size, 1);
720}
721
722
723
724
725
726
727static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
728 struct dma_chan *chan,
729 unsigned long flags)
730{
731 struct desc_info *desc;
732 struct scatterlist *sgl;
733 unsigned int sgl_cnt;
734 int ret;
735 struct bam_transaction *bam_txn = nandc->bam_txn;
736 enum dma_transfer_direction dir_eng;
737 struct dma_async_tx_descriptor *dma_desc;
738
739 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
740 if (!desc)
741 return -ENOMEM;
742
743 if (chan == nandc->cmd_chan) {
744 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
745 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
746 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
747 dir_eng = DMA_MEM_TO_DEV;
748 desc->dir = DMA_TO_DEVICE;
749 } else if (chan == nandc->tx_chan) {
750 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
751 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
752 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
753 dir_eng = DMA_MEM_TO_DEV;
754 desc->dir = DMA_TO_DEVICE;
755 } else {
756 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
757 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
758 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
759 dir_eng = DMA_DEV_TO_MEM;
760 desc->dir = DMA_FROM_DEVICE;
761 }
762
763 sg_mark_end(sgl + sgl_cnt - 1);
764 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
765 if (ret == 0) {
766 dev_err(nandc->dev, "failure in mapping desc\n");
767 kfree(desc);
768 return -ENOMEM;
769 }
770
771 desc->sgl_cnt = sgl_cnt;
772 desc->bam_sgl = sgl;
773
774 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
775 flags);
776
777 if (!dma_desc) {
778 dev_err(nandc->dev, "failure in prep desc\n");
779 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
780 kfree(desc);
781 return -EINVAL;
782 }
783
784 desc->dma_desc = dma_desc;
785
786
787 if (chan == nandc->cmd_chan)
788 bam_txn->last_cmd_desc = dma_desc;
789 else
790 bam_txn->last_data_desc = dma_desc;
791
792 list_add_tail(&desc->node, &nandc->desc_list);
793
794 return 0;
795}
796
797
798
799
800
801
802
803
804
805
806static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
807 int reg_off, const void *vaddr,
808 int size, unsigned int flags)
809{
810 int bam_ce_size;
811 int i, ret;
812 struct bam_cmd_element *bam_ce_buffer;
813 struct bam_transaction *bam_txn = nandc->bam_txn;
814
815 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
816
817
818 for (i = 0; i < size; i++) {
819 if (read)
820 bam_prep_ce(&bam_ce_buffer[i],
821 nandc_reg_phys(nandc, reg_off + 4 * i),
822 BAM_READ_COMMAND,
823 reg_buf_dma_addr(nandc,
824 (__le32 *)vaddr + i));
825 else
826 bam_prep_ce_le32(&bam_ce_buffer[i],
827 nandc_reg_phys(nandc, reg_off + 4 * i),
828 BAM_WRITE_COMMAND,
829 *((__le32 *)vaddr + i));
830 }
831
832 bam_txn->bam_ce_pos += size;
833
834
835 if (flags & NAND_BAM_NEXT_SGL) {
836 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
837 bam_ce_size = (bam_txn->bam_ce_pos -
838 bam_txn->bam_ce_start) *
839 sizeof(struct bam_cmd_element);
840 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
841 bam_ce_buffer, bam_ce_size);
842 bam_txn->cmd_sgl_pos++;
843 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
844
845 if (flags & NAND_BAM_NWD) {
846 ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
847 DMA_PREP_FENCE |
848 DMA_PREP_CMD);
849 if (ret)
850 return ret;
851 }
852 }
853
854 return 0;
855}
856
857
858
859
860
861static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
862 const void *vaddr,
863 int size, unsigned int flags)
864{
865 int ret;
866 struct bam_transaction *bam_txn = nandc->bam_txn;
867
868 if (read) {
869 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
870 vaddr, size);
871 bam_txn->rx_sgl_pos++;
872 } else {
873 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
874 vaddr, size);
875 bam_txn->tx_sgl_pos++;
876
877
878
879
880
881 if (!(flags & NAND_BAM_NO_EOT)) {
882 ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
883 DMA_PREP_INTERRUPT);
884 if (ret)
885 return ret;
886 }
887 }
888
889 return 0;
890}
891
892static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
893 int reg_off, const void *vaddr, int size,
894 bool flow_control)
895{
896 struct desc_info *desc;
897 struct dma_async_tx_descriptor *dma_desc;
898 struct scatterlist *sgl;
899 struct dma_slave_config slave_conf;
900 enum dma_transfer_direction dir_eng;
901 int ret;
902
903 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
904 if (!desc)
905 return -ENOMEM;
906
907 sgl = &desc->adm_sgl;
908
909 sg_init_one(sgl, vaddr, size);
910
911 if (read) {
912 dir_eng = DMA_DEV_TO_MEM;
913 desc->dir = DMA_FROM_DEVICE;
914 } else {
915 dir_eng = DMA_MEM_TO_DEV;
916 desc->dir = DMA_TO_DEVICE;
917 }
918
919 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
920 if (ret == 0) {
921 ret = -ENOMEM;
922 goto err;
923 }
924
925 memset(&slave_conf, 0x00, sizeof(slave_conf));
926
927 slave_conf.device_fc = flow_control;
928 if (read) {
929 slave_conf.src_maxburst = 16;
930 slave_conf.src_addr = nandc->base_dma + reg_off;
931 slave_conf.slave_id = nandc->data_crci;
932 } else {
933 slave_conf.dst_maxburst = 16;
934 slave_conf.dst_addr = nandc->base_dma + reg_off;
935 slave_conf.slave_id = nandc->cmd_crci;
936 }
937
938 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
939 if (ret) {
940 dev_err(nandc->dev, "failed to configure dma channel\n");
941 goto err;
942 }
943
944 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
945 if (!dma_desc) {
946 dev_err(nandc->dev, "failed to prepare desc\n");
947 ret = -EINVAL;
948 goto err;
949 }
950
951 desc->dma_desc = dma_desc;
952
953 list_add_tail(&desc->node, &nandc->desc_list);
954
955 return 0;
956err:
957 kfree(desc);
958
959 return ret;
960}
961
962
963
964
965
966
967
968
969
970static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
971 int num_regs, unsigned int flags)
972{
973 bool flow_control = false;
974 void *vaddr;
975
976 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
977 nandc->reg_read_pos += num_regs;
978
979 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
980 first = dev_cmd_reg_addr(nandc, first);
981
982 if (nandc->props->is_bam)
983 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
984 num_regs, flags);
985
986 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
987 flow_control = true;
988
989 return prep_adm_dma_desc(nandc, true, first, vaddr,
990 num_regs * sizeof(u32), flow_control);
991}
992
993
994
995
996
997
998
999
1000
1001static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1002 int num_regs, unsigned int flags)
1003{
1004 bool flow_control = false;
1005 struct nandc_regs *regs = nandc->regs;
1006 void *vaddr;
1007
1008 vaddr = offset_to_nandc_reg(regs, first);
1009
1010 if (first == NAND_ERASED_CW_DETECT_CFG) {
1011 if (flags & NAND_ERASED_CW_SET)
1012 vaddr = ®s->erased_cw_detect_cfg_set;
1013 else
1014 vaddr = ®s->erased_cw_detect_cfg_clr;
1015 }
1016
1017 if (first == NAND_EXEC_CMD)
1018 flags |= NAND_BAM_NWD;
1019
1020 if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1021 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1022
1023 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1024 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1025
1026 if (nandc->props->is_bam)
1027 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1028 num_regs, flags);
1029
1030 if (first == NAND_FLASH_CMD)
1031 flow_control = true;
1032
1033 return prep_adm_dma_desc(nandc, false, first, vaddr,
1034 num_regs * sizeof(u32), flow_control);
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1047 const u8 *vaddr, int size, unsigned int flags)
1048{
1049 if (nandc->props->is_bam)
1050 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1051
1052 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1065 const u8 *vaddr, int size, unsigned int flags)
1066{
1067 if (nandc->props->is_bam)
1068 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1069
1070 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1071}
1072
1073
1074
1075
1076
1077static void config_nand_page_read(struct qcom_nand_controller *nandc)
1078{
1079 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1080 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1081 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1082 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1083 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1084 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1085}
1086
1087
1088
1089
1090
1091static void
1092config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
1093{
1094 if (nandc->props->is_bam)
1095 write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
1096 NAND_BAM_NEXT_SGL);
1097
1098 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1099 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1100
1101 if (use_ecc) {
1102 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1103 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1104 NAND_BAM_NEXT_SGL);
1105 } else {
1106 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1107 }
1108}
1109
1110
1111
1112
1113
1114static void
1115config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
1116 bool use_ecc)
1117{
1118 config_nand_page_read(nandc);
1119 config_nand_cw_read(nandc, use_ecc);
1120}
1121
1122
1123
1124
1125
1126static void config_nand_page_write(struct qcom_nand_controller *nandc)
1127{
1128 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1129 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1130 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1131 NAND_BAM_NEXT_SGL);
1132}
1133
1134
1135
1136
1137
1138static void config_nand_cw_write(struct qcom_nand_controller *nandc)
1139{
1140 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1141 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1142
1143 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1144
1145 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1146 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1147}
1148
1149
1150
1151
1152
1153
1154
1155static int nandc_param(struct qcom_nand_host *host)
1156{
1157 struct nand_chip *chip = &host->chip;
1158 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1159
1160
1161
1162
1163
1164
1165 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
1166 nandc_set_reg(nandc, NAND_ADDR0, 0);
1167 nandc_set_reg(nandc, NAND_ADDR1, 0);
1168 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1169 | 512 << UD_SIZE_BYTES
1170 | 5 << NUM_ADDR_CYCLES
1171 | 0 << SPARE_SIZE_BYTES);
1172 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1173 | 0 << CS_ACTIVE_BSY
1174 | 17 << BAD_BLOCK_BYTE_NUM
1175 | 1 << BAD_BLOCK_IN_SPARE_AREA
1176 | 2 << WR_RD_BSY_GAP
1177 | 0 << WIDE_FLASH
1178 | 1 << DEV0_CFG1_ECC_DISABLE);
1179 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1180
1181
1182 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
1183 (nandc->vld & ~READ_START_VLD));
1184 nandc_set_reg(nandc, NAND_DEV_CMD1,
1185 (nandc->cmd1 & ~(0xFF << READ_ADDR))
1186 | NAND_CMD_PARAM << READ_ADDR);
1187
1188 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1189
1190 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1191 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1192 nandc_set_read_loc(nandc, 0, 0, 512, 1);
1193
1194 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1195 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1196
1197 nandc->buf_count = 512;
1198 memset(nandc->data_buffer, 0xff, nandc->buf_count);
1199
1200 config_nand_single_cw_page_read(nandc, false);
1201
1202 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1203 nandc->buf_count, 0);
1204
1205
1206 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1207 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1208
1209 return 0;
1210}
1211
1212
1213static int erase_block(struct qcom_nand_host *host, int page_addr)
1214{
1215 struct nand_chip *chip = &host->chip;
1216 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1217
1218 nandc_set_reg(nandc, NAND_FLASH_CMD,
1219 OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1220 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1221 nandc_set_reg(nandc, NAND_ADDR1, 0);
1222 nandc_set_reg(nandc, NAND_DEV0_CFG0,
1223 host->cfg0_raw & ~(7 << CW_PER_PAGE));
1224 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1225 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1226 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1227 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1228
1229 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1230 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1231 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1232
1233 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1234
1235 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1236 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1237
1238 return 0;
1239}
1240
1241
1242static int read_id(struct qcom_nand_host *host, int column)
1243{
1244 struct nand_chip *chip = &host->chip;
1245 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1246
1247 if (column == -1)
1248 return 0;
1249
1250 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
1251 nandc_set_reg(nandc, NAND_ADDR0, column);
1252 nandc_set_reg(nandc, NAND_ADDR1, 0);
1253 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1254 nandc->props->is_bam ? 0 : DM_EN);
1255 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1256
1257 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1258 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1259
1260 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1261
1262 return 0;
1263}
1264
1265
1266static int reset(struct qcom_nand_host *host)
1267{
1268 struct nand_chip *chip = &host->chip;
1269 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1270
1271 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
1272 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1273
1274 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1275 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1276
1277 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1278
1279 return 0;
1280}
1281
1282
1283static int submit_descs(struct qcom_nand_controller *nandc)
1284{
1285 struct desc_info *desc;
1286 dma_cookie_t cookie = 0;
1287 struct bam_transaction *bam_txn = nandc->bam_txn;
1288 int r;
1289
1290 if (nandc->props->is_bam) {
1291 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1292 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1293 if (r)
1294 return r;
1295 }
1296
1297 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1298 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1299 DMA_PREP_INTERRUPT);
1300 if (r)
1301 return r;
1302 }
1303
1304 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1305 r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1306 DMA_PREP_CMD);
1307 if (r)
1308 return r;
1309 }
1310 }
1311
1312 list_for_each_entry(desc, &nandc->desc_list, node)
1313 cookie = dmaengine_submit(desc->dma_desc);
1314
1315 if (nandc->props->is_bam) {
1316 bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1317 bam_txn->last_cmd_desc->callback_param = bam_txn;
1318 if (bam_txn->last_data_desc) {
1319 bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1320 bam_txn->last_data_desc->callback_param = bam_txn;
1321 bam_txn->wait_second_completion = true;
1322 }
1323
1324 dma_async_issue_pending(nandc->tx_chan);
1325 dma_async_issue_pending(nandc->rx_chan);
1326 dma_async_issue_pending(nandc->cmd_chan);
1327
1328 if (!wait_for_completion_timeout(&bam_txn->txn_done,
1329 QPIC_NAND_COMPLETION_TIMEOUT))
1330 return -ETIMEDOUT;
1331 } else {
1332 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1333 return -ETIMEDOUT;
1334 }
1335
1336 return 0;
1337}
1338
1339static void free_descs(struct qcom_nand_controller *nandc)
1340{
1341 struct desc_info *desc, *n;
1342
1343 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1344 list_del(&desc->node);
1345
1346 if (nandc->props->is_bam)
1347 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1348 desc->sgl_cnt, desc->dir);
1349 else
1350 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1351 desc->dir);
1352
1353 kfree(desc);
1354 }
1355}
1356
1357
1358static void clear_read_regs(struct qcom_nand_controller *nandc)
1359{
1360 nandc->reg_read_pos = 0;
1361 nandc_read_buffer_sync(nandc, false);
1362}
1363
1364static void pre_command(struct qcom_nand_host *host, int command)
1365{
1366 struct nand_chip *chip = &host->chip;
1367 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1368
1369 nandc->buf_count = 0;
1370 nandc->buf_start = 0;
1371 host->use_ecc = false;
1372 host->last_command = command;
1373
1374 clear_read_regs(nandc);
1375
1376 if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1377 command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1378 clear_bam_transaction(nandc);
1379}
1380
1381
1382
1383
1384
1385
1386static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1387{
1388 struct nand_chip *chip = &host->chip;
1389 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1390 struct nand_ecc_ctrl *ecc = &chip->ecc;
1391 int num_cw;
1392 int i;
1393
1394 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1395 nandc_read_buffer_sync(nandc, true);
1396
1397 for (i = 0; i < num_cw; i++) {
1398 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1399
1400 if (flash_status & FS_MPU_ERR)
1401 host->status &= ~NAND_STATUS_WP;
1402
1403 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1404 (flash_status &
1405 FS_DEVICE_STS_ERR)))
1406 host->status |= NAND_STATUS_FAIL;
1407 }
1408}
1409
1410static void post_command(struct qcom_nand_host *host, int command)
1411{
1412 struct nand_chip *chip = &host->chip;
1413 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1414
1415 switch (command) {
1416 case NAND_CMD_READID:
1417 nandc_read_buffer_sync(nandc, true);
1418 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1419 nandc->buf_count);
1420 break;
1421 case NAND_CMD_PAGEPROG:
1422 case NAND_CMD_ERASE1:
1423 parse_erase_write_errors(host, command);
1424 break;
1425 default:
1426 break;
1427 }
1428}
1429
1430
1431
1432
1433
1434
1435
1436static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
1437 int column, int page_addr)
1438{
1439 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1440 struct nand_ecc_ctrl *ecc = &chip->ecc;
1441 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1442 bool wait = false;
1443 int ret = 0;
1444
1445 pre_command(host, command);
1446
1447 switch (command) {
1448 case NAND_CMD_RESET:
1449 ret = reset(host);
1450 wait = true;
1451 break;
1452
1453 case NAND_CMD_READID:
1454 nandc->buf_count = 4;
1455 ret = read_id(host, column);
1456 wait = true;
1457 break;
1458
1459 case NAND_CMD_PARAM:
1460 ret = nandc_param(host);
1461 wait = true;
1462 break;
1463
1464 case NAND_CMD_ERASE1:
1465 ret = erase_block(host, page_addr);
1466 wait = true;
1467 break;
1468
1469 case NAND_CMD_READ0:
1470
1471 WARN_ON(column != 0);
1472
1473 host->use_ecc = true;
1474 set_address(host, 0, page_addr);
1475 update_rw_regs(host, ecc->steps, true);
1476 break;
1477
1478 case NAND_CMD_SEQIN:
1479 WARN_ON(column != 0);
1480 set_address(host, 0, page_addr);
1481 break;
1482
1483 case NAND_CMD_PAGEPROG:
1484 case NAND_CMD_STATUS:
1485 case NAND_CMD_NONE:
1486 default:
1487 break;
1488 }
1489
1490 if (ret) {
1491 dev_err(nandc->dev, "failure executing command %d\n",
1492 command);
1493 free_descs(nandc);
1494 return;
1495 }
1496
1497 if (wait) {
1498 ret = submit_descs(nandc);
1499 if (ret)
1500 dev_err(nandc->dev,
1501 "failure submitting descs for command %d\n",
1502 command);
1503 }
1504
1505 free_descs(nandc);
1506
1507 post_command(host, command);
1508}
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1522{
1523 u8 empty1, empty2;
1524
1525
1526
1527
1528
1529
1530
1531 empty1 = data_buf[3];
1532 empty2 = data_buf[175];
1533
1534
1535
1536
1537
1538 if ((empty1 == 0x54 && empty2 == 0xff) ||
1539 (empty1 == 0xff && empty2 == 0x54)) {
1540 data_buf[3] = 0xff;
1541 data_buf[175] = 0xff;
1542 }
1543
1544
1545
1546
1547
1548 if (memchr_inv(data_buf, 0xff, data_len)) {
1549 data_buf[3] = empty1;
1550 data_buf[175] = empty2;
1551
1552 return false;
1553 }
1554
1555 return true;
1556}
1557
1558struct read_stats {
1559 __le32 flash;
1560 __le32 buffer;
1561 __le32 erased_cw;
1562};
1563
1564
1565static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1566{
1567 struct nand_chip *chip = &host->chip;
1568 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1569 int i;
1570
1571 for (i = 0; i < cw_cnt; i++) {
1572 u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1573
1574 if (flash & (FS_OP_ERR | FS_MPU_ERR))
1575 return -EIO;
1576 }
1577
1578 return 0;
1579}
1580
1581
1582static int
1583qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1584 u8 *data_buf, u8 *oob_buf, int page, int cw)
1585{
1586 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1587 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1588 struct nand_ecc_ctrl *ecc = &chip->ecc;
1589 int data_size1, data_size2, oob_size1, oob_size2;
1590 int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1591
1592 nand_read_page_op(chip, page, 0, NULL, 0);
1593 host->use_ecc = false;
1594
1595 clear_bam_transaction(nandc);
1596 set_address(host, host->cw_size * cw, page);
1597 update_rw_regs(host, 1, true);
1598 config_nand_page_read(nandc);
1599
1600 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1601 oob_size1 = host->bbm_size;
1602
1603 if (cw == (ecc->steps - 1)) {
1604 data_size2 = ecc->size - data_size1 -
1605 ((ecc->steps - 1) * 4);
1606 oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1607 host->spare_bytes;
1608 } else {
1609 data_size2 = host->cw_data - data_size1;
1610 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1611 }
1612
1613 if (nandc->props->is_bam) {
1614 nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1615 read_loc += data_size1;
1616
1617 nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1618 read_loc += oob_size1;
1619
1620 nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1621 read_loc += data_size2;
1622
1623 nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1624 }
1625
1626 config_nand_cw_read(nandc, false);
1627
1628 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1629 reg_off += data_size1;
1630
1631 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1632 reg_off += oob_size1;
1633
1634 read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1635 reg_off += data_size2;
1636
1637 read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1638
1639 ret = submit_descs(nandc);
1640 free_descs(nandc);
1641 if (ret) {
1642 dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1643 return ret;
1644 }
1645
1646 return check_flash_errors(host, 1);
1647}
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664static int
1665check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1666 u8 *oob_buf, unsigned long uncorrectable_cws,
1667 int page, unsigned int max_bitflips)
1668{
1669 struct nand_chip *chip = &host->chip;
1670 struct mtd_info *mtd = nand_to_mtd(chip);
1671 struct nand_ecc_ctrl *ecc = &chip->ecc;
1672 u8 *cw_data_buf, *cw_oob_buf;
1673 int cw, data_size, oob_size, ret = 0;
1674
1675 if (!data_buf)
1676 data_buf = nand_get_data_buf(chip);
1677
1678 if (!oob_buf) {
1679 nand_get_data_buf(chip);
1680 oob_buf = chip->oob_poi;
1681 }
1682
1683 for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1684 if (cw == (ecc->steps - 1)) {
1685 data_size = ecc->size - ((ecc->steps - 1) * 4);
1686 oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1687 } else {
1688 data_size = host->cw_data;
1689 oob_size = host->ecc_bytes_hw;
1690 }
1691
1692
1693 cw_data_buf = data_buf + (cw * host->cw_data);
1694 cw_oob_buf = oob_buf + (cw * ecc->bytes);
1695
1696 ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1697 cw_oob_buf, page, cw);
1698 if (ret)
1699 return ret;
1700
1701
1702
1703
1704
1705 ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1706 cw_oob_buf + host->bbm_size,
1707 oob_size, NULL,
1708 0, ecc->strength);
1709 if (ret < 0) {
1710 mtd->ecc_stats.failed++;
1711 } else {
1712 mtd->ecc_stats.corrected += ret;
1713 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1714 }
1715 }
1716
1717 return max_bitflips;
1718}
1719
1720
1721
1722
1723
1724static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1725 u8 *oob_buf, int page)
1726{
1727 struct nand_chip *chip = &host->chip;
1728 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1729 struct mtd_info *mtd = nand_to_mtd(chip);
1730 struct nand_ecc_ctrl *ecc = &chip->ecc;
1731 unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1732 struct read_stats *buf;
1733 bool flash_op_err = false, erased;
1734 int i;
1735 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1736
1737 buf = (struct read_stats *)nandc->reg_read_buf;
1738 nandc_read_buffer_sync(nandc, true);
1739
1740 for (i = 0; i < ecc->steps; i++, buf++) {
1741 u32 flash, buffer, erased_cw;
1742 int data_len, oob_len;
1743
1744 if (i == (ecc->steps - 1)) {
1745 data_len = ecc->size - ((ecc->steps - 1) << 2);
1746 oob_len = ecc->steps << 2;
1747 } else {
1748 data_len = host->cw_data;
1749 oob_len = 0;
1750 }
1751
1752 flash = le32_to_cpu(buf->flash);
1753 buffer = le32_to_cpu(buf->buffer);
1754 erased_cw = le32_to_cpu(buf->erased_cw);
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764 if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1765
1766
1767
1768
1769 if (host->bch_enabled) {
1770 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1771 true : false;
1772
1773
1774
1775
1776
1777
1778 } else if (data_buf) {
1779 erased = erased_chunk_check_and_fixup(data_buf,
1780 data_len);
1781 } else {
1782 erased = false;
1783 }
1784
1785 if (!erased)
1786 uncorrectable_cws |= BIT(i);
1787
1788
1789
1790
1791
1792
1793 } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1794 flash_op_err = true;
1795
1796
1797
1798
1799 } else {
1800 unsigned int stat;
1801
1802 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1803 mtd->ecc_stats.corrected += stat;
1804 max_bitflips = max(max_bitflips, stat);
1805 }
1806
1807 if (data_buf)
1808 data_buf += data_len;
1809 if (oob_buf)
1810 oob_buf += oob_len + ecc->bytes;
1811 }
1812
1813 if (flash_op_err)
1814 return -EIO;
1815
1816 if (!uncorrectable_cws)
1817 return max_bitflips;
1818
1819 return check_for_erased_page(host, data_buf_start, oob_buf_start,
1820 uncorrectable_cws, page,
1821 max_bitflips);
1822}
1823
1824
1825
1826
1827
1828static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1829 u8 *oob_buf, int page)
1830{
1831 struct nand_chip *chip = &host->chip;
1832 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1833 struct nand_ecc_ctrl *ecc = &chip->ecc;
1834 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1835 int i, ret;
1836
1837 config_nand_page_read(nandc);
1838
1839
1840 for (i = 0; i < ecc->steps; i++) {
1841 int data_size, oob_size;
1842
1843 if (i == (ecc->steps - 1)) {
1844 data_size = ecc->size - ((ecc->steps - 1) << 2);
1845 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1846 host->spare_bytes;
1847 } else {
1848 data_size = host->cw_data;
1849 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1850 }
1851
1852 if (nandc->props->is_bam) {
1853 if (data_buf && oob_buf) {
1854 nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1855 nandc_set_read_loc(nandc, 1, data_size,
1856 oob_size, 1);
1857 } else if (data_buf) {
1858 nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1859 } else {
1860 nandc_set_read_loc(nandc, 0, data_size,
1861 oob_size, 1);
1862 }
1863 }
1864
1865 config_nand_cw_read(nandc, true);
1866
1867 if (data_buf)
1868 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1869 data_size, 0);
1870
1871
1872
1873
1874
1875
1876
1877
1878 if (oob_buf) {
1879 int j;
1880
1881 for (j = 0; j < host->bbm_size; j++)
1882 *oob_buf++ = 0xff;
1883
1884 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1885 oob_buf, oob_size, 0);
1886 }
1887
1888 if (data_buf)
1889 data_buf += data_size;
1890 if (oob_buf)
1891 oob_buf += oob_size;
1892 }
1893
1894 ret = submit_descs(nandc);
1895 free_descs(nandc);
1896
1897 if (ret) {
1898 dev_err(nandc->dev, "failure to read page/oob\n");
1899 return ret;
1900 }
1901
1902 return parse_read_errors(host, data_buf_start, oob_buf_start, page);
1903}
1904
1905
1906
1907
1908
1909static int copy_last_cw(struct qcom_nand_host *host, int page)
1910{
1911 struct nand_chip *chip = &host->chip;
1912 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1913 struct nand_ecc_ctrl *ecc = &chip->ecc;
1914 int size;
1915 int ret;
1916
1917 clear_read_regs(nandc);
1918
1919 size = host->use_ecc ? host->cw_data : host->cw_size;
1920
1921
1922 memset(nandc->data_buffer, 0xff, size);
1923
1924 set_address(host, host->cw_size * (ecc->steps - 1), page);
1925 update_rw_regs(host, 1, true);
1926
1927 config_nand_single_cw_page_read(nandc, host->use_ecc);
1928
1929 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1930
1931 ret = submit_descs(nandc);
1932 if (ret)
1933 dev_err(nandc->dev, "failed to copy last codeword\n");
1934
1935 free_descs(nandc);
1936
1937 return ret;
1938}
1939
1940
1941static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
1942 int oob_required, int page)
1943{
1944 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1945 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1946 u8 *data_buf, *oob_buf = NULL;
1947
1948 nand_read_page_op(chip, page, 0, NULL, 0);
1949 data_buf = buf;
1950 oob_buf = oob_required ? chip->oob_poi : NULL;
1951
1952 clear_bam_transaction(nandc);
1953
1954 return read_page_ecc(host, data_buf, oob_buf, page);
1955}
1956
1957
1958static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1959 int oob_required, int page)
1960{
1961 struct mtd_info *mtd = nand_to_mtd(chip);
1962 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1963 struct nand_ecc_ctrl *ecc = &chip->ecc;
1964 int cw, ret;
1965 u8 *data_buf = buf, *oob_buf = chip->oob_poi;
1966
1967 for (cw = 0; cw < ecc->steps; cw++) {
1968 ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
1969 page, cw);
1970 if (ret)
1971 return ret;
1972
1973 data_buf += host->cw_data;
1974 oob_buf += ecc->bytes;
1975 }
1976
1977 return 0;
1978}
1979
1980
1981static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
1982{
1983 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1984 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1985 struct nand_ecc_ctrl *ecc = &chip->ecc;
1986
1987 clear_read_regs(nandc);
1988 clear_bam_transaction(nandc);
1989
1990 host->use_ecc = true;
1991 set_address(host, 0, page);
1992 update_rw_regs(host, ecc->steps, true);
1993
1994 return read_page_ecc(host, NULL, chip->oob_poi, page);
1995}
1996
1997
1998static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
1999 int oob_required, int page)
2000{
2001 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2002 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2003 struct nand_ecc_ctrl *ecc = &chip->ecc;
2004 u8 *data_buf, *oob_buf;
2005 int i, ret;
2006
2007 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2008
2009 clear_read_regs(nandc);
2010 clear_bam_transaction(nandc);
2011
2012 data_buf = (u8 *)buf;
2013 oob_buf = chip->oob_poi;
2014
2015 host->use_ecc = true;
2016 update_rw_regs(host, ecc->steps, false);
2017 config_nand_page_write(nandc);
2018
2019 for (i = 0; i < ecc->steps; i++) {
2020 int data_size, oob_size;
2021
2022 if (i == (ecc->steps - 1)) {
2023 data_size = ecc->size - ((ecc->steps - 1) << 2);
2024 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
2025 host->spare_bytes;
2026 } else {
2027 data_size = host->cw_data;
2028 oob_size = ecc->bytes;
2029 }
2030
2031
2032 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
2033 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2034
2035
2036
2037
2038
2039
2040
2041
2042 if (i == (ecc->steps - 1)) {
2043 oob_buf += host->bbm_size;
2044
2045 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2046 oob_buf, oob_size, 0);
2047 }
2048
2049 config_nand_cw_write(nandc);
2050
2051 data_buf += data_size;
2052 oob_buf += oob_size;
2053 }
2054
2055 ret = submit_descs(nandc);
2056 if (ret)
2057 dev_err(nandc->dev, "failure to write page\n");
2058
2059 free_descs(nandc);
2060
2061 if (!ret)
2062 ret = nand_prog_page_end_op(chip);
2063
2064 return ret;
2065}
2066
2067
2068static int qcom_nandc_write_page_raw(struct nand_chip *chip,
2069 const uint8_t *buf, int oob_required,
2070 int page)
2071{
2072 struct mtd_info *mtd = nand_to_mtd(chip);
2073 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2074 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2075 struct nand_ecc_ctrl *ecc = &chip->ecc;
2076 u8 *data_buf, *oob_buf;
2077 int i, ret;
2078
2079 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2080 clear_read_regs(nandc);
2081 clear_bam_transaction(nandc);
2082
2083 data_buf = (u8 *)buf;
2084 oob_buf = chip->oob_poi;
2085
2086 host->use_ecc = false;
2087 update_rw_regs(host, ecc->steps, false);
2088 config_nand_page_write(nandc);
2089
2090 for (i = 0; i < ecc->steps; i++) {
2091 int data_size1, data_size2, oob_size1, oob_size2;
2092 int reg_off = FLASH_BUF_ACC;
2093
2094 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2095 oob_size1 = host->bbm_size;
2096
2097 if (i == (ecc->steps - 1)) {
2098 data_size2 = ecc->size - data_size1 -
2099 ((ecc->steps - 1) << 2);
2100 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2101 host->spare_bytes;
2102 } else {
2103 data_size2 = host->cw_data - data_size1;
2104 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2105 }
2106
2107 write_data_dma(nandc, reg_off, data_buf, data_size1,
2108 NAND_BAM_NO_EOT);
2109 reg_off += data_size1;
2110 data_buf += data_size1;
2111
2112 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2113 NAND_BAM_NO_EOT);
2114 reg_off += oob_size1;
2115 oob_buf += oob_size1;
2116
2117 write_data_dma(nandc, reg_off, data_buf, data_size2,
2118 NAND_BAM_NO_EOT);
2119 reg_off += data_size2;
2120 data_buf += data_size2;
2121
2122 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2123 oob_buf += oob_size2;
2124
2125 config_nand_cw_write(nandc);
2126 }
2127
2128 ret = submit_descs(nandc);
2129 if (ret)
2130 dev_err(nandc->dev, "failure to write raw page\n");
2131
2132 free_descs(nandc);
2133
2134 if (!ret)
2135 ret = nand_prog_page_end_op(chip);
2136
2137 return ret;
2138}
2139
2140
2141
2142
2143
2144
2145
2146
2147static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
2148{
2149 struct mtd_info *mtd = nand_to_mtd(chip);
2150 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2151 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2152 struct nand_ecc_ctrl *ecc = &chip->ecc;
2153 u8 *oob = chip->oob_poi;
2154 int data_size, oob_size;
2155 int ret;
2156
2157 host->use_ecc = true;
2158 clear_bam_transaction(nandc);
2159
2160
2161 data_size = ecc->size - ((ecc->steps - 1) << 2);
2162 oob_size = mtd->oobavail;
2163
2164 memset(nandc->data_buffer, 0xff, host->cw_data);
2165
2166 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2167 0, mtd->oobavail);
2168
2169 set_address(host, host->cw_size * (ecc->steps - 1), page);
2170 update_rw_regs(host, 1, false);
2171
2172 config_nand_page_write(nandc);
2173 write_data_dma(nandc, FLASH_BUF_ACC,
2174 nandc->data_buffer, data_size + oob_size, 0);
2175 config_nand_cw_write(nandc);
2176
2177 ret = submit_descs(nandc);
2178
2179 free_descs(nandc);
2180
2181 if (ret) {
2182 dev_err(nandc->dev, "failure to write oob\n");
2183 return -EIO;
2184 }
2185
2186 return nand_prog_page_end_op(chip);
2187}
2188
2189static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
2190{
2191 struct mtd_info *mtd = nand_to_mtd(chip);
2192 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2193 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2194 struct nand_ecc_ctrl *ecc = &chip->ecc;
2195 int page, ret, bbpos, bad = 0;
2196
2197 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2198
2199
2200
2201
2202
2203
2204
2205 host->use_ecc = false;
2206
2207 clear_bam_transaction(nandc);
2208 ret = copy_last_cw(host, page);
2209 if (ret)
2210 goto err;
2211
2212 if (check_flash_errors(host, 1)) {
2213 dev_warn(nandc->dev, "error when trying to read BBM\n");
2214 goto err;
2215 }
2216
2217 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2218
2219 bad = nandc->data_buffer[bbpos] != 0xff;
2220
2221 if (chip->options & NAND_BUSWIDTH_16)
2222 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2223err:
2224 return bad;
2225}
2226
2227static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
2228{
2229 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2230 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2231 struct nand_ecc_ctrl *ecc = &chip->ecc;
2232 int page, ret;
2233
2234 clear_read_regs(nandc);
2235 clear_bam_transaction(nandc);
2236
2237
2238
2239
2240
2241
2242 memset(nandc->data_buffer, 0x00, host->cw_size);
2243
2244 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2245
2246
2247 host->use_ecc = false;
2248 set_address(host, host->cw_size * (ecc->steps - 1), page);
2249 update_rw_regs(host, 1, false);
2250
2251 config_nand_page_write(nandc);
2252 write_data_dma(nandc, FLASH_BUF_ACC,
2253 nandc->data_buffer, host->cw_size, 0);
2254 config_nand_cw_write(nandc);
2255
2256 ret = submit_descs(nandc);
2257
2258 free_descs(nandc);
2259
2260 if (ret) {
2261 dev_err(nandc->dev, "failure to update BBM\n");
2262 return -EIO;
2263 }
2264
2265 return nand_prog_page_end_op(chip);
2266}
2267
2268
2269
2270
2271
2272
2273
2274static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
2275{
2276 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2277 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2278 u8 *buf = nandc->data_buffer;
2279 u8 ret = 0x0;
2280
2281 if (host->last_command == NAND_CMD_STATUS) {
2282 ret = host->status;
2283
2284 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2285
2286 return ret;
2287 }
2288
2289 if (nandc->buf_start < nandc->buf_count)
2290 ret = buf[nandc->buf_start++];
2291
2292 return ret;
2293}
2294
2295static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
2296{
2297 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2298 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2299
2300 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2301 nandc->buf_start += real_len;
2302}
2303
2304static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
2305 int len)
2306{
2307 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2308 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2309
2310 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2311
2312 nandc->buf_start += real_len;
2313}
2314
2315
2316static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
2317{
2318 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2319
2320 if (chipnr <= 0)
2321 return;
2322
2323 dev_warn(nandc->dev, "invalid chip select\n");
2324}
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2412 struct mtd_oob_region *oobregion)
2413{
2414 struct nand_chip *chip = mtd_to_nand(mtd);
2415 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2416 struct nand_ecc_ctrl *ecc = &chip->ecc;
2417
2418 if (section > 1)
2419 return -ERANGE;
2420
2421 if (!section) {
2422 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2423 host->bbm_size;
2424 oobregion->offset = 0;
2425 } else {
2426 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2427 oobregion->offset = mtd->oobsize - oobregion->length;
2428 }
2429
2430 return 0;
2431}
2432
2433static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2434 struct mtd_oob_region *oobregion)
2435{
2436 struct nand_chip *chip = mtd_to_nand(mtd);
2437 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2438 struct nand_ecc_ctrl *ecc = &chip->ecc;
2439
2440 if (section)
2441 return -ERANGE;
2442
2443 oobregion->length = ecc->steps * 4;
2444 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2445
2446 return 0;
2447}
2448
2449static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2450 .ecc = qcom_nand_ooblayout_ecc,
2451 .free = qcom_nand_ooblayout_free,
2452};
2453
2454static int
2455qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2456{
2457 return strength == 4 ? 12 : 16;
2458}
2459NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2460 NANDC_STEP_SIZE, 4, 8);
2461
2462static int qcom_nand_attach_chip(struct nand_chip *chip)
2463{
2464 struct mtd_info *mtd = nand_to_mtd(chip);
2465 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2466 struct nand_ecc_ctrl *ecc = &chip->ecc;
2467 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2468 int cwperpage, bad_block_byte, ret;
2469 bool wide_bus;
2470 int ecc_mode = 1;
2471
2472
2473 ecc->size = NANDC_STEP_SIZE;
2474 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2475 cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2476
2477
2478
2479
2480
2481 ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2482 mtd->oobsize - (cwperpage * 4));
2483 if (ret) {
2484 dev_err(nandc->dev, "No valid ECC settings possible\n");
2485 return ret;
2486 }
2487
2488 if (ecc->strength >= 8) {
2489
2490 host->bch_enabled = true;
2491 ecc_mode = 1;
2492
2493 if (wide_bus) {
2494 host->ecc_bytes_hw = 14;
2495 host->spare_bytes = 0;
2496 host->bbm_size = 2;
2497 } else {
2498 host->ecc_bytes_hw = 13;
2499 host->spare_bytes = 2;
2500 host->bbm_size = 1;
2501 }
2502 } else {
2503
2504
2505
2506
2507
2508 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2509
2510 host->bch_enabled = true;
2511 ecc_mode = 0;
2512
2513 if (wide_bus) {
2514 host->ecc_bytes_hw = 8;
2515 host->spare_bytes = 2;
2516 host->bbm_size = 2;
2517 } else {
2518 host->ecc_bytes_hw = 7;
2519 host->spare_bytes = 4;
2520 host->bbm_size = 1;
2521 }
2522 } else {
2523
2524 host->ecc_bytes_hw = 10;
2525
2526 if (wide_bus) {
2527 host->spare_bytes = 0;
2528 host->bbm_size = 2;
2529 } else {
2530 host->spare_bytes = 1;
2531 host->bbm_size = 1;
2532 }
2533 }
2534 }
2535
2536
2537
2538
2539
2540
2541
2542 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2543
2544 ecc->read_page = qcom_nandc_read_page;
2545 ecc->read_page_raw = qcom_nandc_read_page_raw;
2546 ecc->read_oob = qcom_nandc_read_oob;
2547 ecc->write_page = qcom_nandc_write_page;
2548 ecc->write_page_raw = qcom_nandc_write_page_raw;
2549 ecc->write_oob = qcom_nandc_write_oob;
2550
2551 ecc->mode = NAND_ECC_HW;
2552
2553 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2554
2555 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2556 cwperpage);
2557
2558
2559
2560
2561
2562
2563 host->cw_data = 516;
2564
2565
2566
2567
2568
2569 host->cw_size = host->cw_data + ecc->bytes;
2570 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2571
2572 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2573 | host->cw_data << UD_SIZE_BYTES
2574 | 0 << DISABLE_STATUS_AFTER_WRITE
2575 | 5 << NUM_ADDR_CYCLES
2576 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2577 | 0 << STATUS_BFR_READ
2578 | 1 << SET_RD_MODE_AFTER_STATUS
2579 | host->spare_bytes << SPARE_SIZE_BYTES;
2580
2581 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2582 | 0 << CS_ACTIVE_BSY
2583 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2584 | 0 << BAD_BLOCK_IN_SPARE_AREA
2585 | 2 << WR_RD_BSY_GAP
2586 | wide_bus << WIDE_FLASH
2587 | host->bch_enabled << ENABLE_BCH_ECC;
2588
2589 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2590 | host->cw_size << UD_SIZE_BYTES
2591 | 5 << NUM_ADDR_CYCLES
2592 | 0 << SPARE_SIZE_BYTES;
2593
2594 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2595 | 0 << CS_ACTIVE_BSY
2596 | 17 << BAD_BLOCK_BYTE_NUM
2597 | 1 << BAD_BLOCK_IN_SPARE_AREA
2598 | 2 << WR_RD_BSY_GAP
2599 | wide_bus << WIDE_FLASH
2600 | 1 << DEV0_CFG1_ECC_DISABLE;
2601
2602 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2603 | 0 << ECC_SW_RESET
2604 | host->cw_data << ECC_NUM_DATA_BYTES
2605 | 1 << ECC_FORCE_CLK_OPEN
2606 | ecc_mode << ECC_MODE
2607 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2608
2609 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2610
2611 host->clrflashstatus = FS_READY_BSY_N;
2612 host->clrreadstatus = 0xc0;
2613 nandc->regs->erased_cw_detect_cfg_clr =
2614 cpu_to_le32(CLR_ERASED_PAGE_DET);
2615 nandc->regs->erased_cw_detect_cfg_set =
2616 cpu_to_le32(SET_ERASED_PAGE_DET);
2617
2618 dev_dbg(nandc->dev,
2619 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2620 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2621 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2622 cwperpage);
2623
2624 return 0;
2625}
2626
2627static const struct nand_controller_ops qcom_nandc_ops = {
2628 .attach_chip = qcom_nand_attach_chip,
2629};
2630
2631static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2632{
2633 int ret;
2634
2635 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2636 if (ret) {
2637 dev_err(nandc->dev, "failed to set DMA mask\n");
2638 return ret;
2639 }
2640
2641
2642
2643
2644
2645
2646
2647 nandc->buf_size = 532;
2648
2649 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2650 GFP_KERNEL);
2651 if (!nandc->data_buffer)
2652 return -ENOMEM;
2653
2654 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2655 GFP_KERNEL);
2656 if (!nandc->regs)
2657 return -ENOMEM;
2658
2659 nandc->reg_read_buf = devm_kcalloc(nandc->dev,
2660 MAX_REG_RD, sizeof(*nandc->reg_read_buf),
2661 GFP_KERNEL);
2662 if (!nandc->reg_read_buf)
2663 return -ENOMEM;
2664
2665 if (nandc->props->is_bam) {
2666 nandc->reg_read_dma =
2667 dma_map_single(nandc->dev, nandc->reg_read_buf,
2668 MAX_REG_RD *
2669 sizeof(*nandc->reg_read_buf),
2670 DMA_FROM_DEVICE);
2671 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2672 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2673 return -EIO;
2674 }
2675
2676 nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
2677 if (!nandc->tx_chan) {
2678 dev_err(nandc->dev, "failed to request tx channel\n");
2679 return -ENODEV;
2680 }
2681
2682 nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
2683 if (!nandc->rx_chan) {
2684 dev_err(nandc->dev, "failed to request rx channel\n");
2685 return -ENODEV;
2686 }
2687
2688 nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
2689 if (!nandc->cmd_chan) {
2690 dev_err(nandc->dev, "failed to request cmd channel\n");
2691 return -ENODEV;
2692 }
2693
2694
2695
2696
2697
2698
2699
2700 nandc->max_cwperpage = 1;
2701 nandc->bam_txn = alloc_bam_transaction(nandc);
2702 if (!nandc->bam_txn) {
2703 dev_err(nandc->dev,
2704 "failed to allocate bam transaction\n");
2705 return -ENOMEM;
2706 }
2707 } else {
2708 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
2709 if (!nandc->chan) {
2710 dev_err(nandc->dev,
2711 "failed to request slave channel\n");
2712 return -ENODEV;
2713 }
2714 }
2715
2716 INIT_LIST_HEAD(&nandc->desc_list);
2717 INIT_LIST_HEAD(&nandc->host_list);
2718
2719 nand_controller_init(&nandc->controller);
2720 nandc->controller.ops = &qcom_nandc_ops;
2721
2722 return 0;
2723}
2724
2725static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2726{
2727 if (nandc->props->is_bam) {
2728 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2729 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2730 MAX_REG_RD *
2731 sizeof(*nandc->reg_read_buf),
2732 DMA_FROM_DEVICE);
2733
2734 if (nandc->tx_chan)
2735 dma_release_channel(nandc->tx_chan);
2736
2737 if (nandc->rx_chan)
2738 dma_release_channel(nandc->rx_chan);
2739
2740 if (nandc->cmd_chan)
2741 dma_release_channel(nandc->cmd_chan);
2742 } else {
2743 if (nandc->chan)
2744 dma_release_channel(nandc->chan);
2745 }
2746}
2747
2748
2749static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2750{
2751 u32 nand_ctrl;
2752
2753
2754 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2755 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2756 NAND_DEV_CMD_VLD_VAL);
2757
2758
2759 if (nandc->props->is_bam) {
2760 nand_ctrl = nandc_read(nandc, NAND_CTRL);
2761 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2762 } else {
2763 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2764 }
2765
2766
2767 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2768 nandc->vld = NAND_DEV_CMD_VLD_VAL;
2769
2770 return 0;
2771}
2772
2773static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2774 struct qcom_nand_host *host,
2775 struct device_node *dn)
2776{
2777 struct nand_chip *chip = &host->chip;
2778 struct mtd_info *mtd = nand_to_mtd(chip);
2779 struct device *dev = nandc->dev;
2780 int ret;
2781
2782 ret = of_property_read_u32(dn, "reg", &host->cs);
2783 if (ret) {
2784 dev_err(dev, "can't get chip-select\n");
2785 return -ENXIO;
2786 }
2787
2788 nand_set_flash_node(chip, dn);
2789 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2790 if (!mtd->name)
2791 return -ENOMEM;
2792
2793 mtd->owner = THIS_MODULE;
2794 mtd->dev.parent = dev;
2795
2796 chip->legacy.cmdfunc = qcom_nandc_command;
2797 chip->legacy.select_chip = qcom_nandc_select_chip;
2798 chip->legacy.read_byte = qcom_nandc_read_byte;
2799 chip->legacy.read_buf = qcom_nandc_read_buf;
2800 chip->legacy.write_buf = qcom_nandc_write_buf;
2801 chip->legacy.set_features = nand_get_set_features_notsupp;
2802 chip->legacy.get_features = nand_get_set_features_notsupp;
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812 chip->legacy.block_bad = qcom_nandc_block_bad;
2813 chip->legacy.block_markbad = qcom_nandc_block_markbad;
2814
2815 chip->controller = &nandc->controller;
2816 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2817 NAND_SKIP_BBTSCAN;
2818
2819
2820 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2821
2822 ret = nand_scan(chip, 1);
2823 if (ret)
2824 return ret;
2825
2826 if (nandc->props->is_bam) {
2827 free_bam_transaction(nandc);
2828 nandc->bam_txn = alloc_bam_transaction(nandc);
2829 if (!nandc->bam_txn) {
2830 dev_err(nandc->dev,
2831 "failed to allocate bam transaction\n");
2832 return -ENOMEM;
2833 }
2834 }
2835
2836 ret = mtd_device_register(mtd, NULL, 0);
2837 if (ret)
2838 nand_cleanup(chip);
2839
2840 return ret;
2841}
2842
2843static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2844{
2845 struct device *dev = nandc->dev;
2846 struct device_node *dn = dev->of_node, *child;
2847 struct qcom_nand_host *host;
2848 int ret;
2849
2850 for_each_available_child_of_node(dn, child) {
2851 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2852 if (!host) {
2853 of_node_put(child);
2854 return -ENOMEM;
2855 }
2856
2857 ret = qcom_nand_host_init_and_register(nandc, host, child);
2858 if (ret) {
2859 devm_kfree(dev, host);
2860 continue;
2861 }
2862
2863 list_add_tail(&host->node, &nandc->host_list);
2864 }
2865
2866 if (list_empty(&nandc->host_list))
2867 return -ENODEV;
2868
2869 return 0;
2870}
2871
2872
2873static int qcom_nandc_parse_dt(struct platform_device *pdev)
2874{
2875 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2876 struct device_node *np = nandc->dev->of_node;
2877 int ret;
2878
2879 if (!nandc->props->is_bam) {
2880 ret = of_property_read_u32(np, "qcom,cmd-crci",
2881 &nandc->cmd_crci);
2882 if (ret) {
2883 dev_err(nandc->dev, "command CRCI unspecified\n");
2884 return ret;
2885 }
2886
2887 ret = of_property_read_u32(np, "qcom,data-crci",
2888 &nandc->data_crci);
2889 if (ret) {
2890 dev_err(nandc->dev, "data CRCI unspecified\n");
2891 return ret;
2892 }
2893 }
2894
2895 return 0;
2896}
2897
2898static int qcom_nandc_probe(struct platform_device *pdev)
2899{
2900 struct qcom_nand_controller *nandc;
2901 const void *dev_data;
2902 struct device *dev = &pdev->dev;
2903 struct resource *res;
2904 int ret;
2905
2906 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2907 if (!nandc)
2908 return -ENOMEM;
2909
2910 platform_set_drvdata(pdev, nandc);
2911 nandc->dev = dev;
2912
2913 dev_data = of_device_get_match_data(dev);
2914 if (!dev_data) {
2915 dev_err(&pdev->dev, "failed to get device data\n");
2916 return -ENODEV;
2917 }
2918
2919 nandc->props = dev_data;
2920
2921 nandc->core_clk = devm_clk_get(dev, "core");
2922 if (IS_ERR(nandc->core_clk))
2923 return PTR_ERR(nandc->core_clk);
2924
2925 nandc->aon_clk = devm_clk_get(dev, "aon");
2926 if (IS_ERR(nandc->aon_clk))
2927 return PTR_ERR(nandc->aon_clk);
2928
2929 ret = qcom_nandc_parse_dt(pdev);
2930 if (ret)
2931 return ret;
2932
2933 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2934 nandc->base = devm_ioremap_resource(dev, res);
2935 if (IS_ERR(nandc->base))
2936 return PTR_ERR(nandc->base);
2937
2938 nandc->base_phys = res->start;
2939 nandc->base_dma = dma_map_resource(dev, res->start,
2940 resource_size(res),
2941 DMA_BIDIRECTIONAL, 0);
2942 if (!nandc->base_dma)
2943 return -ENXIO;
2944
2945 ret = qcom_nandc_alloc(nandc);
2946 if (ret)
2947 goto err_nandc_alloc;
2948
2949 ret = clk_prepare_enable(nandc->core_clk);
2950 if (ret)
2951 goto err_core_clk;
2952
2953 ret = clk_prepare_enable(nandc->aon_clk);
2954 if (ret)
2955 goto err_aon_clk;
2956
2957 ret = qcom_nandc_setup(nandc);
2958 if (ret)
2959 goto err_setup;
2960
2961 ret = qcom_probe_nand_devices(nandc);
2962 if (ret)
2963 goto err_setup;
2964
2965 return 0;
2966
2967err_setup:
2968 clk_disable_unprepare(nandc->aon_clk);
2969err_aon_clk:
2970 clk_disable_unprepare(nandc->core_clk);
2971err_core_clk:
2972 qcom_nandc_unalloc(nandc);
2973err_nandc_alloc:
2974 dma_unmap_resource(dev, res->start, resource_size(res),
2975 DMA_BIDIRECTIONAL, 0);
2976
2977 return ret;
2978}
2979
2980static int qcom_nandc_remove(struct platform_device *pdev)
2981{
2982 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2983 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2984 struct qcom_nand_host *host;
2985
2986 list_for_each_entry(host, &nandc->host_list, node)
2987 nand_release(&host->chip);
2988
2989
2990 qcom_nandc_unalloc(nandc);
2991
2992 clk_disable_unprepare(nandc->aon_clk);
2993 clk_disable_unprepare(nandc->core_clk);
2994
2995 dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
2996 DMA_BIDIRECTIONAL, 0);
2997
2998 return 0;
2999}
3000
3001static const struct qcom_nandc_props ipq806x_nandc_props = {
3002 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3003 .is_bam = false,
3004 .dev_cmd_reg_start = 0x0,
3005};
3006
3007static const struct qcom_nandc_props ipq4019_nandc_props = {
3008 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3009 .is_bam = true,
3010 .dev_cmd_reg_start = 0x0,
3011};
3012
3013static const struct qcom_nandc_props ipq8074_nandc_props = {
3014 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3015 .is_bam = true,
3016 .dev_cmd_reg_start = 0x7000,
3017};
3018
3019
3020
3021
3022
3023static const struct of_device_id qcom_nandc_of_match[] = {
3024 {
3025 .compatible = "qcom,ipq806x-nand",
3026 .data = &ipq806x_nandc_props,
3027 },
3028 {
3029 .compatible = "qcom,ipq4019-nand",
3030 .data = &ipq4019_nandc_props,
3031 },
3032 {
3033 .compatible = "qcom,ipq8074-nand",
3034 .data = &ipq8074_nandc_props,
3035 },
3036 {}
3037};
3038MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3039
3040static struct platform_driver qcom_nandc_driver = {
3041 .driver = {
3042 .name = "qcom-nandc",
3043 .of_match_table = qcom_nandc_of_match,
3044 },
3045 .probe = qcom_nandc_probe,
3046 .remove = qcom_nandc_remove,
3047};
3048module_platform_driver(qcom_nandc_driver);
3049
3050MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3051MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3052MODULE_LICENSE("GPL v2");
3053