1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/bitops.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/module.h>
20#include <linux/mtd/rawnand.h>
21#include <linux/mtd/partitions.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/delay.h>
25#include <linux/dma/qcom_bam_dma.h>
26#include <linux/dma-direct.h>
27
28
29#define NAND_FLASH_CMD 0x00
30#define NAND_ADDR0 0x04
31#define NAND_ADDR1 0x08
32#define NAND_FLASH_CHIP_SELECT 0x0c
33#define NAND_EXEC_CMD 0x10
34#define NAND_FLASH_STATUS 0x14
35#define NAND_BUFFER_STATUS 0x18
36#define NAND_DEV0_CFG0 0x20
37#define NAND_DEV0_CFG1 0x24
38#define NAND_DEV0_ECC_CFG 0x28
39#define NAND_DEV1_ECC_CFG 0x2c
40#define NAND_DEV1_CFG0 0x30
41#define NAND_DEV1_CFG1 0x34
42#define NAND_READ_ID 0x40
43#define NAND_READ_STATUS 0x44
44#define NAND_DEV_CMD0 0xa0
45#define NAND_DEV_CMD1 0xa4
46#define NAND_DEV_CMD2 0xa8
47#define NAND_DEV_CMD_VLD 0xac
48#define SFLASHC_BURST_CFG 0xe0
49#define NAND_ERASED_CW_DETECT_CFG 0xe8
50#define NAND_ERASED_CW_DETECT_STATUS 0xec
51#define NAND_EBI2_ECC_BUF_CFG 0xf0
52#define FLASH_BUF_ACC 0x100
53
54#define NAND_CTRL 0xf00
55#define NAND_VERSION 0xf08
56#define NAND_READ_LOCATION_0 0xf20
57#define NAND_READ_LOCATION_1 0xf24
58#define NAND_READ_LOCATION_2 0xf28
59#define NAND_READ_LOCATION_3 0xf2c
60
61
62#define NAND_DEV_CMD1_RESTORE 0xdead
63#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
64
65
66#define PAGE_ACC BIT(4)
67#define LAST_PAGE BIT(5)
68
69
70#define NAND_DEV_SEL 0
71#define DM_EN BIT(2)
72
73
74#define FS_OP_ERR BIT(4)
75#define FS_READY_BSY_N BIT(5)
76#define FS_MPU_ERR BIT(8)
77#define FS_DEVICE_STS_ERR BIT(16)
78#define FS_DEVICE_WP BIT(23)
79
80
81#define BS_UNCORRECTABLE_BIT BIT(8)
82#define BS_CORRECTABLE_ERR_MSK 0x1f
83
84
85#define DISABLE_STATUS_AFTER_WRITE 4
86#define CW_PER_PAGE 6
87#define UD_SIZE_BYTES 9
88#define ECC_PARITY_SIZE_BYTES_RS 19
89#define SPARE_SIZE_BYTES 23
90#define NUM_ADDR_CYCLES 27
91#define STATUS_BFR_READ 30
92#define SET_RD_MODE_AFTER_STATUS 31
93
94
95#define DEV0_CFG1_ECC_DISABLE 0
96#define WIDE_FLASH 1
97#define NAND_RECOVERY_CYCLES 2
98#define CS_ACTIVE_BSY 5
99#define BAD_BLOCK_BYTE_NUM 6
100#define BAD_BLOCK_IN_SPARE_AREA 16
101#define WR_RD_BSY_GAP 17
102#define ENABLE_BCH_ECC 27
103
104
105#define ECC_CFG_ECC_DISABLE 0
106#define ECC_SW_RESET 1
107#define ECC_MODE 4
108#define ECC_PARITY_SIZE_BYTES_BCH 8
109#define ECC_NUM_DATA_BYTES 16
110#define ECC_FORCE_CLK_OPEN 30
111
112
113#define READ_ADDR 0
114
115
116#define READ_START_VLD BIT(0)
117#define READ_STOP_VLD BIT(1)
118#define WRITE_START_VLD BIT(2)
119#define ERASE_START_VLD BIT(3)
120#define SEQ_READ_START_VLD BIT(4)
121
122
123#define NUM_STEPS 0
124
125
126#define ERASED_CW_ECC_MASK 1
127#define AUTO_DETECT_RES 0
128#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
129#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
130#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
131#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
132#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
133
134
135#define PAGE_ALL_ERASED BIT(7)
136#define CODEWORD_ALL_ERASED BIT(6)
137#define PAGE_ERASED BIT(5)
138#define CODEWORD_ERASED BIT(4)
139#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
140#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
141
142
143#define READ_LOCATION_OFFSET 0
144#define READ_LOCATION_SIZE 16
145#define READ_LOCATION_LAST 31
146
147
148#define NAND_VERSION_MAJOR_MASK 0xf0000000
149#define NAND_VERSION_MAJOR_SHIFT 28
150#define NAND_VERSION_MINOR_MASK 0x0fff0000
151#define NAND_VERSION_MINOR_SHIFT 16
152
153
154#define PAGE_READ 0x2
155#define PAGE_READ_WITH_ECC 0x3
156#define PAGE_READ_WITH_ECC_SPARE 0x4
157#define PROGRAM_PAGE 0x6
158#define PAGE_PROGRAM_WITH_ECC 0x7
159#define PROGRAM_PAGE_SPARE 0x9
160#define BLOCK_ERASE 0xa
161#define FETCH_ID 0xb
162#define RESET_DEVICE 0xd
163
164
165#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
166 ERASE_START_VLD | SEQ_READ_START_VLD)
167
168
169#define BAM_MODE_EN BIT(0)
170
171
172
173
174
175#define NANDC_STEP_SIZE 512
176
177
178
179
180
181#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
182
183
184#define MAX_REG_RD (3 * MAX_NUM_STEPS)
185
186
187#define ECC_NONE BIT(0)
188#define ECC_RS_4BIT BIT(1)
189#define ECC_BCH_4BIT BIT(2)
190#define ECC_BCH_8BIT BIT(3)
191
192#define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
193nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
194 ((offset) << READ_LOCATION_OFFSET) | \
195 ((size) << READ_LOCATION_SIZE) | \
196 ((is_last) << READ_LOCATION_LAST))
197
198
199
200
201
202#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
203
204
205#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
206
207
208#define reg_buf_dma_addr(chip, vaddr) \
209 ((chip)->reg_read_dma + \
210 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
211
212#define QPIC_PER_CW_CMD_ELEMENTS 32
213#define QPIC_PER_CW_CMD_SGL 32
214#define QPIC_PER_CW_DATA_SGL 8
215
216#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
217
218
219
220
221
222
223#define NAND_BAM_NO_EOT BIT(0)
224
225#define NAND_BAM_NWD BIT(1)
226
227#define NAND_BAM_NEXT_SGL BIT(2)
228
229
230
231
232#define NAND_ERASED_CW_SET BIT(4)
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256struct bam_transaction {
257 struct bam_cmd_element *bam_ce;
258 struct scatterlist *cmd_sgl;
259 struct scatterlist *data_sgl;
260 u32 bam_ce_pos;
261 u32 bam_ce_start;
262 u32 cmd_sgl_pos;
263 u32 cmd_sgl_start;
264 u32 tx_sgl_pos;
265 u32 tx_sgl_start;
266 u32 rx_sgl_pos;
267 u32 rx_sgl_start;
268 bool wait_second_completion;
269 struct completion txn_done;
270 struct dma_async_tx_descriptor *last_data_desc;
271 struct dma_async_tx_descriptor *last_cmd_desc;
272};
273
274
275
276
277
278
279
280
281
282
283
284struct desc_info {
285 struct list_head node;
286
287 enum dma_data_direction dir;
288 union {
289 struct scatterlist adm_sgl;
290 struct {
291 struct scatterlist *bam_sgl;
292 int sgl_cnt;
293 };
294 };
295 struct dma_async_tx_descriptor *dma_desc;
296};
297
298
299
300
301
302struct nandc_regs {
303 __le32 cmd;
304 __le32 addr0;
305 __le32 addr1;
306 __le32 chip_sel;
307 __le32 exec;
308
309 __le32 cfg0;
310 __le32 cfg1;
311 __le32 ecc_bch_cfg;
312
313 __le32 clrflashstatus;
314 __le32 clrreadstatus;
315
316 __le32 cmd1;
317 __le32 vld;
318
319 __le32 orig_cmd1;
320 __le32 orig_vld;
321
322 __le32 ecc_buf_cfg;
323 __le32 read_location0;
324 __le32 read_location1;
325 __le32 read_location2;
326 __le32 read_location3;
327
328 __le32 erased_cw_detect_cfg_clr;
329 __le32 erased_cw_detect_cfg_set;
330};
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367struct qcom_nand_controller {
368 struct nand_controller controller;
369 struct list_head host_list;
370
371 struct device *dev;
372
373 void __iomem *base;
374 phys_addr_t base_phys;
375 dma_addr_t base_dma;
376
377 struct clk *core_clk;
378 struct clk *aon_clk;
379
380 union {
381
382 struct {
383 struct dma_chan *tx_chan;
384 struct dma_chan *rx_chan;
385 struct dma_chan *cmd_chan;
386 };
387
388
389 struct {
390 struct dma_chan *chan;
391 unsigned int cmd_crci;
392 unsigned int data_crci;
393 };
394 };
395
396 struct list_head desc_list;
397 struct bam_transaction *bam_txn;
398
399 u8 *data_buffer;
400 int buf_size;
401 int buf_count;
402 int buf_start;
403 unsigned int max_cwperpage;
404
405 __le32 *reg_read_buf;
406 dma_addr_t reg_read_dma;
407 int reg_read_pos;
408
409 struct nandc_regs *regs;
410
411 u32 cmd1, vld;
412 const struct qcom_nandc_props *props;
413};
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442struct qcom_nand_host {
443 struct nand_chip chip;
444 struct list_head node;
445
446 int cs;
447 int cw_size;
448 int cw_data;
449 bool use_ecc;
450 bool bch_enabled;
451 int ecc_bytes_hw;
452 int spare_bytes;
453 int bbm_size;
454 u8 status;
455 int last_command;
456
457 u32 cfg0, cfg1;
458 u32 cfg0_raw, cfg1_raw;
459 u32 ecc_buf_cfg;
460 u32 ecc_bch_cfg;
461 u32 clrflashstatus;
462 u32 clrreadstatus;
463};
464
465
466
467
468
469
470
471
472struct qcom_nandc_props {
473 u32 ecc_modes;
474 bool is_bam;
475 u32 dev_cmd_reg_start;
476};
477
478
479static void free_bam_transaction(struct qcom_nand_controller *nandc)
480{
481 struct bam_transaction *bam_txn = nandc->bam_txn;
482
483 devm_kfree(nandc->dev, bam_txn);
484}
485
486
487static struct bam_transaction *
488alloc_bam_transaction(struct qcom_nand_controller *nandc)
489{
490 struct bam_transaction *bam_txn;
491 size_t bam_txn_size;
492 unsigned int num_cw = nandc->max_cwperpage;
493 void *bam_txn_buf;
494
495 bam_txn_size =
496 sizeof(*bam_txn) + num_cw *
497 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
498 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
499 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
500
501 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
502 if (!bam_txn_buf)
503 return NULL;
504
505 bam_txn = bam_txn_buf;
506 bam_txn_buf += sizeof(*bam_txn);
507
508 bam_txn->bam_ce = bam_txn_buf;
509 bam_txn_buf +=
510 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
511
512 bam_txn->cmd_sgl = bam_txn_buf;
513 bam_txn_buf +=
514 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
515
516 bam_txn->data_sgl = bam_txn_buf;
517
518 init_completion(&bam_txn->txn_done);
519
520 return bam_txn;
521}
522
523
524static void clear_bam_transaction(struct qcom_nand_controller *nandc)
525{
526 struct bam_transaction *bam_txn = nandc->bam_txn;
527
528 if (!nandc->props->is_bam)
529 return;
530
531 bam_txn->bam_ce_pos = 0;
532 bam_txn->bam_ce_start = 0;
533 bam_txn->cmd_sgl_pos = 0;
534 bam_txn->cmd_sgl_start = 0;
535 bam_txn->tx_sgl_pos = 0;
536 bam_txn->tx_sgl_start = 0;
537 bam_txn->rx_sgl_pos = 0;
538 bam_txn->rx_sgl_start = 0;
539 bam_txn->last_data_desc = NULL;
540 bam_txn->wait_second_completion = false;
541
542 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
543 QPIC_PER_CW_CMD_SGL);
544 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
545 QPIC_PER_CW_DATA_SGL);
546
547 reinit_completion(&bam_txn->txn_done);
548}
549
550
551static void qpic_bam_dma_done(void *data)
552{
553 struct bam_transaction *bam_txn = data;
554
555
556
557
558
559
560
561
562 if (bam_txn->wait_second_completion)
563 bam_txn->wait_second_completion = false;
564 else
565 complete(&bam_txn->txn_done);
566}
567
568static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
569{
570 return container_of(chip, struct qcom_nand_host, chip);
571}
572
573static inline struct qcom_nand_controller *
574get_qcom_nand_controller(struct nand_chip *chip)
575{
576 return container_of(chip->controller, struct qcom_nand_controller,
577 controller);
578}
579
580static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
581{
582 return ioread32(nandc->base + offset);
583}
584
585static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
586 u32 val)
587{
588 iowrite32(val, nandc->base + offset);
589}
590
591static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
592 bool is_cpu)
593{
594 if (!nandc->props->is_bam)
595 return;
596
597 if (is_cpu)
598 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
599 MAX_REG_RD *
600 sizeof(*nandc->reg_read_buf),
601 DMA_FROM_DEVICE);
602 else
603 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
604 MAX_REG_RD *
605 sizeof(*nandc->reg_read_buf),
606 DMA_FROM_DEVICE);
607}
608
609static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
610{
611 switch (offset) {
612 case NAND_FLASH_CMD:
613 return ®s->cmd;
614 case NAND_ADDR0:
615 return ®s->addr0;
616 case NAND_ADDR1:
617 return ®s->addr1;
618 case NAND_FLASH_CHIP_SELECT:
619 return ®s->chip_sel;
620 case NAND_EXEC_CMD:
621 return ®s->exec;
622 case NAND_FLASH_STATUS:
623 return ®s->clrflashstatus;
624 case NAND_DEV0_CFG0:
625 return ®s->cfg0;
626 case NAND_DEV0_CFG1:
627 return ®s->cfg1;
628 case NAND_DEV0_ECC_CFG:
629 return ®s->ecc_bch_cfg;
630 case NAND_READ_STATUS:
631 return ®s->clrreadstatus;
632 case NAND_DEV_CMD1:
633 return ®s->cmd1;
634 case NAND_DEV_CMD1_RESTORE:
635 return ®s->orig_cmd1;
636 case NAND_DEV_CMD_VLD:
637 return ®s->vld;
638 case NAND_DEV_CMD_VLD_RESTORE:
639 return ®s->orig_vld;
640 case NAND_EBI2_ECC_BUF_CFG:
641 return ®s->ecc_buf_cfg;
642 case NAND_READ_LOCATION_0:
643 return ®s->read_location0;
644 case NAND_READ_LOCATION_1:
645 return ®s->read_location1;
646 case NAND_READ_LOCATION_2:
647 return ®s->read_location2;
648 case NAND_READ_LOCATION_3:
649 return ®s->read_location3;
650 default:
651 return NULL;
652 }
653}
654
655static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
656 u32 val)
657{
658 struct nandc_regs *regs = nandc->regs;
659 __le32 *reg;
660
661 reg = offset_to_nandc_reg(regs, offset);
662
663 if (reg)
664 *reg = cpu_to_le32(val);
665}
666
667
668static void set_address(struct qcom_nand_host *host, u16 column, int page)
669{
670 struct nand_chip *chip = &host->chip;
671 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
672
673 if (chip->options & NAND_BUSWIDTH_16)
674 column >>= 1;
675
676 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
677 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
678}
679
680
681
682
683
684
685
686
687static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
688{
689 struct nand_chip *chip = &host->chip;
690 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
691 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
692
693 if (read) {
694 if (host->use_ecc)
695 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
696 else
697 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
698 } else {
699 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
700 }
701
702 if (host->use_ecc) {
703 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
704 (num_cw - 1) << CW_PER_PAGE;
705
706 cfg1 = host->cfg1;
707 ecc_bch_cfg = host->ecc_bch_cfg;
708 } else {
709 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
710 (num_cw - 1) << CW_PER_PAGE;
711
712 cfg1 = host->cfg1_raw;
713 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
714 }
715
716 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
717 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
718 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
719 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
720 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
721 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
722 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
723 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
724
725 if (read)
726 nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
727 host->cw_data : host->cw_size, 1);
728}
729
730
731
732
733
734
735static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
736 struct dma_chan *chan,
737 unsigned long flags)
738{
739 struct desc_info *desc;
740 struct scatterlist *sgl;
741 unsigned int sgl_cnt;
742 int ret;
743 struct bam_transaction *bam_txn = nandc->bam_txn;
744 enum dma_transfer_direction dir_eng;
745 struct dma_async_tx_descriptor *dma_desc;
746
747 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
748 if (!desc)
749 return -ENOMEM;
750
751 if (chan == nandc->cmd_chan) {
752 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
753 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
754 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
755 dir_eng = DMA_MEM_TO_DEV;
756 desc->dir = DMA_TO_DEVICE;
757 } else if (chan == nandc->tx_chan) {
758 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
759 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
760 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
761 dir_eng = DMA_MEM_TO_DEV;
762 desc->dir = DMA_TO_DEVICE;
763 } else {
764 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
765 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
766 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
767 dir_eng = DMA_DEV_TO_MEM;
768 desc->dir = DMA_FROM_DEVICE;
769 }
770
771 sg_mark_end(sgl + sgl_cnt - 1);
772 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
773 if (ret == 0) {
774 dev_err(nandc->dev, "failure in mapping desc\n");
775 kfree(desc);
776 return -ENOMEM;
777 }
778
779 desc->sgl_cnt = sgl_cnt;
780 desc->bam_sgl = sgl;
781
782 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
783 flags);
784
785 if (!dma_desc) {
786 dev_err(nandc->dev, "failure in prep desc\n");
787 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
788 kfree(desc);
789 return -EINVAL;
790 }
791
792 desc->dma_desc = dma_desc;
793
794
795 if (chan == nandc->cmd_chan)
796 bam_txn->last_cmd_desc = dma_desc;
797 else
798 bam_txn->last_data_desc = dma_desc;
799
800 list_add_tail(&desc->node, &nandc->desc_list);
801
802 return 0;
803}
804
805
806
807
808
809
810
811
812
813
814static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
815 int reg_off, const void *vaddr,
816 int size, unsigned int flags)
817{
818 int bam_ce_size;
819 int i, ret;
820 struct bam_cmd_element *bam_ce_buffer;
821 struct bam_transaction *bam_txn = nandc->bam_txn;
822
823 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
824
825
826 for (i = 0; i < size; i++) {
827 if (read)
828 bam_prep_ce(&bam_ce_buffer[i],
829 nandc_reg_phys(nandc, reg_off + 4 * i),
830 BAM_READ_COMMAND,
831 reg_buf_dma_addr(nandc,
832 (__le32 *)vaddr + i));
833 else
834 bam_prep_ce_le32(&bam_ce_buffer[i],
835 nandc_reg_phys(nandc, reg_off + 4 * i),
836 BAM_WRITE_COMMAND,
837 *((__le32 *)vaddr + i));
838 }
839
840 bam_txn->bam_ce_pos += size;
841
842
843 if (flags & NAND_BAM_NEXT_SGL) {
844 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
845 bam_ce_size = (bam_txn->bam_ce_pos -
846 bam_txn->bam_ce_start) *
847 sizeof(struct bam_cmd_element);
848 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
849 bam_ce_buffer, bam_ce_size);
850 bam_txn->cmd_sgl_pos++;
851 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
852
853 if (flags & NAND_BAM_NWD) {
854 ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
855 DMA_PREP_FENCE |
856 DMA_PREP_CMD);
857 if (ret)
858 return ret;
859 }
860 }
861
862 return 0;
863}
864
865
866
867
868
869static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
870 const void *vaddr,
871 int size, unsigned int flags)
872{
873 int ret;
874 struct bam_transaction *bam_txn = nandc->bam_txn;
875
876 if (read) {
877 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
878 vaddr, size);
879 bam_txn->rx_sgl_pos++;
880 } else {
881 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
882 vaddr, size);
883 bam_txn->tx_sgl_pos++;
884
885
886
887
888
889 if (!(flags & NAND_BAM_NO_EOT)) {
890 ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
891 DMA_PREP_INTERRUPT);
892 if (ret)
893 return ret;
894 }
895 }
896
897 return 0;
898}
899
900static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
901 int reg_off, const void *vaddr, int size,
902 bool flow_control)
903{
904 struct desc_info *desc;
905 struct dma_async_tx_descriptor *dma_desc;
906 struct scatterlist *sgl;
907 struct dma_slave_config slave_conf;
908 enum dma_transfer_direction dir_eng;
909 int ret;
910
911 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
912 if (!desc)
913 return -ENOMEM;
914
915 sgl = &desc->adm_sgl;
916
917 sg_init_one(sgl, vaddr, size);
918
919 if (read) {
920 dir_eng = DMA_DEV_TO_MEM;
921 desc->dir = DMA_FROM_DEVICE;
922 } else {
923 dir_eng = DMA_MEM_TO_DEV;
924 desc->dir = DMA_TO_DEVICE;
925 }
926
927 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
928 if (ret == 0) {
929 ret = -ENOMEM;
930 goto err;
931 }
932
933 memset(&slave_conf, 0x00, sizeof(slave_conf));
934
935 slave_conf.device_fc = flow_control;
936 if (read) {
937 slave_conf.src_maxburst = 16;
938 slave_conf.src_addr = nandc->base_dma + reg_off;
939 slave_conf.slave_id = nandc->data_crci;
940 } else {
941 slave_conf.dst_maxburst = 16;
942 slave_conf.dst_addr = nandc->base_dma + reg_off;
943 slave_conf.slave_id = nandc->cmd_crci;
944 }
945
946 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
947 if (ret) {
948 dev_err(nandc->dev, "failed to configure dma channel\n");
949 goto err;
950 }
951
952 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
953 if (!dma_desc) {
954 dev_err(nandc->dev, "failed to prepare desc\n");
955 ret = -EINVAL;
956 goto err;
957 }
958
959 desc->dma_desc = dma_desc;
960
961 list_add_tail(&desc->node, &nandc->desc_list);
962
963 return 0;
964err:
965 kfree(desc);
966
967 return ret;
968}
969
970
971
972
973
974
975
976
977
978static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
979 int num_regs, unsigned int flags)
980{
981 bool flow_control = false;
982 void *vaddr;
983
984 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
985 nandc->reg_read_pos += num_regs;
986
987 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
988 first = dev_cmd_reg_addr(nandc, first);
989
990 if (nandc->props->is_bam)
991 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
992 num_regs, flags);
993
994 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
995 flow_control = true;
996
997 return prep_adm_dma_desc(nandc, true, first, vaddr,
998 num_regs * sizeof(u32), flow_control);
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1010 int num_regs, unsigned int flags)
1011{
1012 bool flow_control = false;
1013 struct nandc_regs *regs = nandc->regs;
1014 void *vaddr;
1015
1016 vaddr = offset_to_nandc_reg(regs, first);
1017
1018 if (first == NAND_ERASED_CW_DETECT_CFG) {
1019 if (flags & NAND_ERASED_CW_SET)
1020 vaddr = ®s->erased_cw_detect_cfg_set;
1021 else
1022 vaddr = ®s->erased_cw_detect_cfg_clr;
1023 }
1024
1025 if (first == NAND_EXEC_CMD)
1026 flags |= NAND_BAM_NWD;
1027
1028 if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1029 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1030
1031 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1032 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1033
1034 if (nandc->props->is_bam)
1035 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1036 num_regs, flags);
1037
1038 if (first == NAND_FLASH_CMD)
1039 flow_control = true;
1040
1041 return prep_adm_dma_desc(nandc, false, first, vaddr,
1042 num_regs * sizeof(u32), flow_control);
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1055 const u8 *vaddr, int size, unsigned int flags)
1056{
1057 if (nandc->props->is_bam)
1058 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1059
1060 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1073 const u8 *vaddr, int size, unsigned int flags)
1074{
1075 if (nandc->props->is_bam)
1076 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1077
1078 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1079}
1080
1081
1082
1083
1084
1085static void config_nand_page_read(struct qcom_nand_controller *nandc)
1086{
1087 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1088 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1089 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1090 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1091 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1092 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1093}
1094
1095
1096
1097
1098
1099static void
1100config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
1101{
1102 if (nandc->props->is_bam)
1103 write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
1104 NAND_BAM_NEXT_SGL);
1105
1106 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1107 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1108
1109 if (use_ecc) {
1110 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1111 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1112 NAND_BAM_NEXT_SGL);
1113 } else {
1114 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1115 }
1116}
1117
1118
1119
1120
1121
1122static void
1123config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
1124 bool use_ecc)
1125{
1126 config_nand_page_read(nandc);
1127 config_nand_cw_read(nandc, use_ecc);
1128}
1129
1130
1131
1132
1133
1134static void config_nand_page_write(struct qcom_nand_controller *nandc)
1135{
1136 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1137 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1138 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1139 NAND_BAM_NEXT_SGL);
1140}
1141
1142
1143
1144
1145
1146static void config_nand_cw_write(struct qcom_nand_controller *nandc)
1147{
1148 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1149 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1150
1151 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1152
1153 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1154 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1155}
1156
1157
1158
1159
1160
1161
1162
1163static int nandc_param(struct qcom_nand_host *host)
1164{
1165 struct nand_chip *chip = &host->chip;
1166 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1167
1168
1169
1170
1171
1172
1173 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
1174 nandc_set_reg(nandc, NAND_ADDR0, 0);
1175 nandc_set_reg(nandc, NAND_ADDR1, 0);
1176 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1177 | 512 << UD_SIZE_BYTES
1178 | 5 << NUM_ADDR_CYCLES
1179 | 0 << SPARE_SIZE_BYTES);
1180 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1181 | 0 << CS_ACTIVE_BSY
1182 | 17 << BAD_BLOCK_BYTE_NUM
1183 | 1 << BAD_BLOCK_IN_SPARE_AREA
1184 | 2 << WR_RD_BSY_GAP
1185 | 0 << WIDE_FLASH
1186 | 1 << DEV0_CFG1_ECC_DISABLE);
1187 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1188
1189
1190 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
1191 (nandc->vld & ~READ_START_VLD));
1192 nandc_set_reg(nandc, NAND_DEV_CMD1,
1193 (nandc->cmd1 & ~(0xFF << READ_ADDR))
1194 | NAND_CMD_PARAM << READ_ADDR);
1195
1196 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1197
1198 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1199 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1200 nandc_set_read_loc(nandc, 0, 0, 512, 1);
1201
1202 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1203 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1204
1205 nandc->buf_count = 512;
1206 memset(nandc->data_buffer, 0xff, nandc->buf_count);
1207
1208 config_nand_single_cw_page_read(nandc, false);
1209
1210 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1211 nandc->buf_count, 0);
1212
1213
1214 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1215 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1216
1217 return 0;
1218}
1219
1220
1221static int erase_block(struct qcom_nand_host *host, int page_addr)
1222{
1223 struct nand_chip *chip = &host->chip;
1224 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1225
1226 nandc_set_reg(nandc, NAND_FLASH_CMD,
1227 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1228 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1229 nandc_set_reg(nandc, NAND_ADDR1, 0);
1230 nandc_set_reg(nandc, NAND_DEV0_CFG0,
1231 host->cfg0_raw & ~(7 << CW_PER_PAGE));
1232 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1233 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1234 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1235 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1236
1237 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1238 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1239 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1240
1241 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1242
1243 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1244 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1245
1246 return 0;
1247}
1248
1249
1250static int read_id(struct qcom_nand_host *host, int column)
1251{
1252 struct nand_chip *chip = &host->chip;
1253 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1254
1255 if (column == -1)
1256 return 0;
1257
1258 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
1259 nandc_set_reg(nandc, NAND_ADDR0, column);
1260 nandc_set_reg(nandc, NAND_ADDR1, 0);
1261 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1262 nandc->props->is_bam ? 0 : DM_EN);
1263 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1264
1265 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1266 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1267
1268 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1269
1270 return 0;
1271}
1272
1273
1274static int reset(struct qcom_nand_host *host)
1275{
1276 struct nand_chip *chip = &host->chip;
1277 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1278
1279 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
1280 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1281
1282 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1283 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1284
1285 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1286
1287 return 0;
1288}
1289
1290
1291static int submit_descs(struct qcom_nand_controller *nandc)
1292{
1293 struct desc_info *desc;
1294 dma_cookie_t cookie = 0;
1295 struct bam_transaction *bam_txn = nandc->bam_txn;
1296 int r;
1297
1298 if (nandc->props->is_bam) {
1299 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1300 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1301 if (r)
1302 return r;
1303 }
1304
1305 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1306 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1307 DMA_PREP_INTERRUPT);
1308 if (r)
1309 return r;
1310 }
1311
1312 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1313 r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1314 DMA_PREP_CMD);
1315 if (r)
1316 return r;
1317 }
1318 }
1319
1320 list_for_each_entry(desc, &nandc->desc_list, node)
1321 cookie = dmaengine_submit(desc->dma_desc);
1322
1323 if (nandc->props->is_bam) {
1324 bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1325 bam_txn->last_cmd_desc->callback_param = bam_txn;
1326 if (bam_txn->last_data_desc) {
1327 bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1328 bam_txn->last_data_desc->callback_param = bam_txn;
1329 bam_txn->wait_second_completion = true;
1330 }
1331
1332 dma_async_issue_pending(nandc->tx_chan);
1333 dma_async_issue_pending(nandc->rx_chan);
1334 dma_async_issue_pending(nandc->cmd_chan);
1335
1336 if (!wait_for_completion_timeout(&bam_txn->txn_done,
1337 QPIC_NAND_COMPLETION_TIMEOUT))
1338 return -ETIMEDOUT;
1339 } else {
1340 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1341 return -ETIMEDOUT;
1342 }
1343
1344 return 0;
1345}
1346
1347static void free_descs(struct qcom_nand_controller *nandc)
1348{
1349 struct desc_info *desc, *n;
1350
1351 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1352 list_del(&desc->node);
1353
1354 if (nandc->props->is_bam)
1355 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1356 desc->sgl_cnt, desc->dir);
1357 else
1358 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1359 desc->dir);
1360
1361 kfree(desc);
1362 }
1363}
1364
1365
1366static void clear_read_regs(struct qcom_nand_controller *nandc)
1367{
1368 nandc->reg_read_pos = 0;
1369 nandc_read_buffer_sync(nandc, false);
1370}
1371
1372static void pre_command(struct qcom_nand_host *host, int command)
1373{
1374 struct nand_chip *chip = &host->chip;
1375 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1376
1377 nandc->buf_count = 0;
1378 nandc->buf_start = 0;
1379 host->use_ecc = false;
1380 host->last_command = command;
1381
1382 clear_read_regs(nandc);
1383
1384 if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1385 command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1386 clear_bam_transaction(nandc);
1387}
1388
1389
1390
1391
1392
1393
1394static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1395{
1396 struct nand_chip *chip = &host->chip;
1397 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1398 struct nand_ecc_ctrl *ecc = &chip->ecc;
1399 int num_cw;
1400 int i;
1401
1402 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1403 nandc_read_buffer_sync(nandc, true);
1404
1405 for (i = 0; i < num_cw; i++) {
1406 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1407
1408 if (flash_status & FS_MPU_ERR)
1409 host->status &= ~NAND_STATUS_WP;
1410
1411 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1412 (flash_status &
1413 FS_DEVICE_STS_ERR)))
1414 host->status |= NAND_STATUS_FAIL;
1415 }
1416}
1417
1418static void post_command(struct qcom_nand_host *host, int command)
1419{
1420 struct nand_chip *chip = &host->chip;
1421 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1422
1423 switch (command) {
1424 case NAND_CMD_READID:
1425 nandc_read_buffer_sync(nandc, true);
1426 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1427 nandc->buf_count);
1428 break;
1429 case NAND_CMD_PAGEPROG:
1430 case NAND_CMD_ERASE1:
1431 parse_erase_write_errors(host, command);
1432 break;
1433 default:
1434 break;
1435 }
1436}
1437
1438
1439
1440
1441
1442
1443
1444static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
1445 int column, int page_addr)
1446{
1447 struct nand_chip *chip = mtd_to_nand(mtd);
1448 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1449 struct nand_ecc_ctrl *ecc = &chip->ecc;
1450 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1451 bool wait = false;
1452 int ret = 0;
1453
1454 pre_command(host, command);
1455
1456 switch (command) {
1457 case NAND_CMD_RESET:
1458 ret = reset(host);
1459 wait = true;
1460 break;
1461
1462 case NAND_CMD_READID:
1463 nandc->buf_count = 4;
1464 ret = read_id(host, column);
1465 wait = true;
1466 break;
1467
1468 case NAND_CMD_PARAM:
1469 ret = nandc_param(host);
1470 wait = true;
1471 break;
1472
1473 case NAND_CMD_ERASE1:
1474 ret = erase_block(host, page_addr);
1475 wait = true;
1476 break;
1477
1478 case NAND_CMD_READ0:
1479
1480 WARN_ON(column != 0);
1481
1482 host->use_ecc = true;
1483 set_address(host, 0, page_addr);
1484 update_rw_regs(host, ecc->steps, true);
1485 break;
1486
1487 case NAND_CMD_SEQIN:
1488 WARN_ON(column != 0);
1489 set_address(host, 0, page_addr);
1490 break;
1491
1492 case NAND_CMD_PAGEPROG:
1493 case NAND_CMD_STATUS:
1494 case NAND_CMD_NONE:
1495 default:
1496 break;
1497 }
1498
1499 if (ret) {
1500 dev_err(nandc->dev, "failure executing command %d\n",
1501 command);
1502 free_descs(nandc);
1503 return;
1504 }
1505
1506 if (wait) {
1507 ret = submit_descs(nandc);
1508 if (ret)
1509 dev_err(nandc->dev,
1510 "failure submitting descs for command %d\n",
1511 command);
1512 }
1513
1514 free_descs(nandc);
1515
1516 post_command(host, command);
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1531{
1532 u8 empty1, empty2;
1533
1534
1535
1536
1537
1538
1539
1540 empty1 = data_buf[3];
1541 empty2 = data_buf[175];
1542
1543
1544
1545
1546
1547 if ((empty1 == 0x54 && empty2 == 0xff) ||
1548 (empty1 == 0xff && empty2 == 0x54)) {
1549 data_buf[3] = 0xff;
1550 data_buf[175] = 0xff;
1551 }
1552
1553
1554
1555
1556
1557 if (memchr_inv(data_buf, 0xff, data_len)) {
1558 data_buf[3] = empty1;
1559 data_buf[175] = empty2;
1560
1561 return false;
1562 }
1563
1564 return true;
1565}
1566
1567struct read_stats {
1568 __le32 flash;
1569 __le32 buffer;
1570 __le32 erased_cw;
1571};
1572
1573
1574static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1575{
1576 struct nand_chip *chip = &host->chip;
1577 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1578 int i;
1579
1580 for (i = 0; i < cw_cnt; i++) {
1581 u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1582
1583 if (flash & (FS_OP_ERR | FS_MPU_ERR))
1584 return -EIO;
1585 }
1586
1587 return 0;
1588}
1589
1590
1591static int
1592qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1593 u8 *data_buf, u8 *oob_buf, int page, int cw)
1594{
1595 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1596 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1597 struct nand_ecc_ctrl *ecc = &chip->ecc;
1598 int data_size1, data_size2, oob_size1, oob_size2;
1599 int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1600
1601 nand_read_page_op(chip, page, 0, NULL, 0);
1602 host->use_ecc = false;
1603
1604 clear_bam_transaction(nandc);
1605 set_address(host, host->cw_size * cw, page);
1606 update_rw_regs(host, 1, true);
1607 config_nand_page_read(nandc);
1608
1609 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1610 oob_size1 = host->bbm_size;
1611
1612 if (cw == (ecc->steps - 1)) {
1613 data_size2 = ecc->size - data_size1 -
1614 ((ecc->steps - 1) * 4);
1615 oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1616 host->spare_bytes;
1617 } else {
1618 data_size2 = host->cw_data - data_size1;
1619 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1620 }
1621
1622 if (nandc->props->is_bam) {
1623 nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1624 read_loc += data_size1;
1625
1626 nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1627 read_loc += oob_size1;
1628
1629 nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1630 read_loc += data_size2;
1631
1632 nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1633 }
1634
1635 config_nand_cw_read(nandc, false);
1636
1637 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1638 reg_off += data_size1;
1639
1640 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1641 reg_off += oob_size1;
1642
1643 read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1644 reg_off += data_size2;
1645
1646 read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1647
1648 ret = submit_descs(nandc);
1649 free_descs(nandc);
1650 if (ret) {
1651 dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1652 return ret;
1653 }
1654
1655 return check_flash_errors(host, 1);
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673static int
1674check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1675 u8 *oob_buf, unsigned long uncorrectable_cws,
1676 int page, unsigned int max_bitflips)
1677{
1678 struct nand_chip *chip = &host->chip;
1679 struct mtd_info *mtd = nand_to_mtd(chip);
1680 struct nand_ecc_ctrl *ecc = &chip->ecc;
1681 u8 *cw_data_buf, *cw_oob_buf;
1682 int cw, data_size, oob_size, ret = 0;
1683
1684 if (!data_buf) {
1685 data_buf = chip->data_buf;
1686 chip->pagebuf = -1;
1687 }
1688
1689 if (!oob_buf) {
1690 oob_buf = chip->oob_poi;
1691 chip->pagebuf = -1;
1692 }
1693
1694 for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1695 if (cw == (ecc->steps - 1)) {
1696 data_size = ecc->size - ((ecc->steps - 1) * 4);
1697 oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1698 } else {
1699 data_size = host->cw_data;
1700 oob_size = host->ecc_bytes_hw;
1701 }
1702
1703
1704 cw_data_buf = data_buf + (cw * host->cw_data);
1705 cw_oob_buf = oob_buf + (cw * ecc->bytes);
1706
1707 ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1708 cw_oob_buf, page, cw);
1709 if (ret)
1710 return ret;
1711
1712
1713
1714
1715
1716 ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1717 cw_oob_buf + host->bbm_size,
1718 oob_size, NULL,
1719 0, ecc->strength);
1720 if (ret < 0) {
1721 mtd->ecc_stats.failed++;
1722 } else {
1723 mtd->ecc_stats.corrected += ret;
1724 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1725 }
1726 }
1727
1728 return max_bitflips;
1729}
1730
1731
1732
1733
1734
1735static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1736 u8 *oob_buf, int page)
1737{
1738 struct nand_chip *chip = &host->chip;
1739 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1740 struct mtd_info *mtd = nand_to_mtd(chip);
1741 struct nand_ecc_ctrl *ecc = &chip->ecc;
1742 unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1743 struct read_stats *buf;
1744 bool flash_op_err = false, erased;
1745 int i;
1746 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1747
1748 buf = (struct read_stats *)nandc->reg_read_buf;
1749 nandc_read_buffer_sync(nandc, true);
1750
1751 for (i = 0; i < ecc->steps; i++, buf++) {
1752 u32 flash, buffer, erased_cw;
1753 int data_len, oob_len;
1754
1755 if (i == (ecc->steps - 1)) {
1756 data_len = ecc->size - ((ecc->steps - 1) << 2);
1757 oob_len = ecc->steps << 2;
1758 } else {
1759 data_len = host->cw_data;
1760 oob_len = 0;
1761 }
1762
1763 flash = le32_to_cpu(buf->flash);
1764 buffer = le32_to_cpu(buf->buffer);
1765 erased_cw = le32_to_cpu(buf->erased_cw);
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775 if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1776
1777
1778
1779
1780 if (host->bch_enabled) {
1781 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1782 true : false;
1783
1784
1785
1786
1787
1788
1789 } else if (data_buf) {
1790 erased = erased_chunk_check_and_fixup(data_buf,
1791 data_len);
1792 } else {
1793 erased = false;
1794 }
1795
1796 if (!erased)
1797 uncorrectable_cws |= BIT(i);
1798
1799
1800
1801
1802
1803
1804 } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1805 flash_op_err = true;
1806
1807
1808
1809
1810 } else {
1811 unsigned int stat;
1812
1813 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1814 mtd->ecc_stats.corrected += stat;
1815 max_bitflips = max(max_bitflips, stat);
1816 }
1817
1818 if (data_buf)
1819 data_buf += data_len;
1820 if (oob_buf)
1821 oob_buf += oob_len + ecc->bytes;
1822 }
1823
1824 if (flash_op_err)
1825 return -EIO;
1826
1827 if (!uncorrectable_cws)
1828 return max_bitflips;
1829
1830 return check_for_erased_page(host, data_buf_start, oob_buf_start,
1831 uncorrectable_cws, page,
1832 max_bitflips);
1833}
1834
1835
1836
1837
1838
1839static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1840 u8 *oob_buf, int page)
1841{
1842 struct nand_chip *chip = &host->chip;
1843 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1844 struct nand_ecc_ctrl *ecc = &chip->ecc;
1845 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1846 int i, ret;
1847
1848 config_nand_page_read(nandc);
1849
1850
1851 for (i = 0; i < ecc->steps; i++) {
1852 int data_size, oob_size;
1853
1854 if (i == (ecc->steps - 1)) {
1855 data_size = ecc->size - ((ecc->steps - 1) << 2);
1856 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1857 host->spare_bytes;
1858 } else {
1859 data_size = host->cw_data;
1860 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1861 }
1862
1863 if (nandc->props->is_bam) {
1864 if (data_buf && oob_buf) {
1865 nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1866 nandc_set_read_loc(nandc, 1, data_size,
1867 oob_size, 1);
1868 } else if (data_buf) {
1869 nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1870 } else {
1871 nandc_set_read_loc(nandc, 0, data_size,
1872 oob_size, 1);
1873 }
1874 }
1875
1876 config_nand_cw_read(nandc, true);
1877
1878 if (data_buf)
1879 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1880 data_size, 0);
1881
1882
1883
1884
1885
1886
1887
1888
1889 if (oob_buf) {
1890 int j;
1891
1892 for (j = 0; j < host->bbm_size; j++)
1893 *oob_buf++ = 0xff;
1894
1895 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1896 oob_buf, oob_size, 0);
1897 }
1898
1899 if (data_buf)
1900 data_buf += data_size;
1901 if (oob_buf)
1902 oob_buf += oob_size;
1903 }
1904
1905 ret = submit_descs(nandc);
1906 free_descs(nandc);
1907
1908 if (ret) {
1909 dev_err(nandc->dev, "failure to read page/oob\n");
1910 return ret;
1911 }
1912
1913 return parse_read_errors(host, data_buf_start, oob_buf_start, page);
1914}
1915
1916
1917
1918
1919
1920static int copy_last_cw(struct qcom_nand_host *host, int page)
1921{
1922 struct nand_chip *chip = &host->chip;
1923 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1924 struct nand_ecc_ctrl *ecc = &chip->ecc;
1925 int size;
1926 int ret;
1927
1928 clear_read_regs(nandc);
1929
1930 size = host->use_ecc ? host->cw_data : host->cw_size;
1931
1932
1933 memset(nandc->data_buffer, 0xff, size);
1934
1935 set_address(host, host->cw_size * (ecc->steps - 1), page);
1936 update_rw_regs(host, 1, true);
1937
1938 config_nand_single_cw_page_read(nandc, host->use_ecc);
1939
1940 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1941
1942 ret = submit_descs(nandc);
1943 if (ret)
1944 dev_err(nandc->dev, "failed to copy last codeword\n");
1945
1946 free_descs(nandc);
1947
1948 return ret;
1949}
1950
1951
1952static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1953 uint8_t *buf, int oob_required, int page)
1954{
1955 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1956 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1957 u8 *data_buf, *oob_buf = NULL;
1958
1959 nand_read_page_op(chip, page, 0, NULL, 0);
1960 data_buf = buf;
1961 oob_buf = oob_required ? chip->oob_poi : NULL;
1962
1963 clear_bam_transaction(nandc);
1964
1965 return read_page_ecc(host, data_buf, oob_buf, page);
1966}
1967
1968
1969static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1970 struct nand_chip *chip, uint8_t *buf,
1971 int oob_required, int page)
1972{
1973 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1974 struct nand_ecc_ctrl *ecc = &chip->ecc;
1975 int cw, ret;
1976 u8 *data_buf = buf, *oob_buf = chip->oob_poi;
1977
1978 for (cw = 0; cw < ecc->steps; cw++) {
1979 ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
1980 page, cw);
1981 if (ret)
1982 return ret;
1983
1984 data_buf += host->cw_data;
1985 oob_buf += ecc->bytes;
1986 }
1987
1988 return 0;
1989}
1990
1991
1992static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1993 int page)
1994{
1995 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1996 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1997 struct nand_ecc_ctrl *ecc = &chip->ecc;
1998
1999 clear_read_regs(nandc);
2000 clear_bam_transaction(nandc);
2001
2002 host->use_ecc = true;
2003 set_address(host, 0, page);
2004 update_rw_regs(host, ecc->steps, true);
2005
2006 return read_page_ecc(host, NULL, chip->oob_poi, page);
2007}
2008
2009
2010static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2011 const uint8_t *buf, int oob_required, int page)
2012{
2013 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2014 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2015 struct nand_ecc_ctrl *ecc = &chip->ecc;
2016 u8 *data_buf, *oob_buf;
2017 int i, ret;
2018
2019 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2020
2021 clear_read_regs(nandc);
2022 clear_bam_transaction(nandc);
2023
2024 data_buf = (u8 *)buf;
2025 oob_buf = chip->oob_poi;
2026
2027 host->use_ecc = true;
2028 update_rw_regs(host, ecc->steps, false);
2029 config_nand_page_write(nandc);
2030
2031 for (i = 0; i < ecc->steps; i++) {
2032 int data_size, oob_size;
2033
2034 if (i == (ecc->steps - 1)) {
2035 data_size = ecc->size - ((ecc->steps - 1) << 2);
2036 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
2037 host->spare_bytes;
2038 } else {
2039 data_size = host->cw_data;
2040 oob_size = ecc->bytes;
2041 }
2042
2043
2044 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
2045 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2046
2047
2048
2049
2050
2051
2052
2053
2054 if (i == (ecc->steps - 1)) {
2055 oob_buf += host->bbm_size;
2056
2057 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2058 oob_buf, oob_size, 0);
2059 }
2060
2061 config_nand_cw_write(nandc);
2062
2063 data_buf += data_size;
2064 oob_buf += oob_size;
2065 }
2066
2067 ret = submit_descs(nandc);
2068 if (ret)
2069 dev_err(nandc->dev, "failure to write page\n");
2070
2071 free_descs(nandc);
2072
2073 if (!ret)
2074 ret = nand_prog_page_end_op(chip);
2075
2076 return ret;
2077}
2078
2079
2080static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
2081 struct nand_chip *chip, const uint8_t *buf,
2082 int oob_required, int page)
2083{
2084 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2085 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2086 struct nand_ecc_ctrl *ecc = &chip->ecc;
2087 u8 *data_buf, *oob_buf;
2088 int i, ret;
2089
2090 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2091 clear_read_regs(nandc);
2092 clear_bam_transaction(nandc);
2093
2094 data_buf = (u8 *)buf;
2095 oob_buf = chip->oob_poi;
2096
2097 host->use_ecc = false;
2098 update_rw_regs(host, ecc->steps, false);
2099 config_nand_page_write(nandc);
2100
2101 for (i = 0; i < ecc->steps; i++) {
2102 int data_size1, data_size2, oob_size1, oob_size2;
2103 int reg_off = FLASH_BUF_ACC;
2104
2105 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2106 oob_size1 = host->bbm_size;
2107
2108 if (i == (ecc->steps - 1)) {
2109 data_size2 = ecc->size - data_size1 -
2110 ((ecc->steps - 1) << 2);
2111 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2112 host->spare_bytes;
2113 } else {
2114 data_size2 = host->cw_data - data_size1;
2115 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2116 }
2117
2118 write_data_dma(nandc, reg_off, data_buf, data_size1,
2119 NAND_BAM_NO_EOT);
2120 reg_off += data_size1;
2121 data_buf += data_size1;
2122
2123 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2124 NAND_BAM_NO_EOT);
2125 reg_off += oob_size1;
2126 oob_buf += oob_size1;
2127
2128 write_data_dma(nandc, reg_off, data_buf, data_size2,
2129 NAND_BAM_NO_EOT);
2130 reg_off += data_size2;
2131 data_buf += data_size2;
2132
2133 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2134 oob_buf += oob_size2;
2135
2136 config_nand_cw_write(nandc);
2137 }
2138
2139 ret = submit_descs(nandc);
2140 if (ret)
2141 dev_err(nandc->dev, "failure to write raw page\n");
2142
2143 free_descs(nandc);
2144
2145 if (!ret)
2146 ret = nand_prog_page_end_op(chip);
2147
2148 return ret;
2149}
2150
2151
2152
2153
2154
2155
2156
2157
2158static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
2159 int page)
2160{
2161 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2162 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2163 struct nand_ecc_ctrl *ecc = &chip->ecc;
2164 u8 *oob = chip->oob_poi;
2165 int data_size, oob_size;
2166 int ret;
2167
2168 host->use_ecc = true;
2169 clear_bam_transaction(nandc);
2170
2171
2172 data_size = ecc->size - ((ecc->steps - 1) << 2);
2173 oob_size = mtd->oobavail;
2174
2175 memset(nandc->data_buffer, 0xff, host->cw_data);
2176
2177 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2178 0, mtd->oobavail);
2179
2180 set_address(host, host->cw_size * (ecc->steps - 1), page);
2181 update_rw_regs(host, 1, false);
2182
2183 config_nand_page_write(nandc);
2184 write_data_dma(nandc, FLASH_BUF_ACC,
2185 nandc->data_buffer, data_size + oob_size, 0);
2186 config_nand_cw_write(nandc);
2187
2188 ret = submit_descs(nandc);
2189
2190 free_descs(nandc);
2191
2192 if (ret) {
2193 dev_err(nandc->dev, "failure to write oob\n");
2194 return -EIO;
2195 }
2196
2197 return nand_prog_page_end_op(chip);
2198}
2199
2200static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
2201{
2202 struct nand_chip *chip = mtd_to_nand(mtd);
2203 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2204 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2205 struct nand_ecc_ctrl *ecc = &chip->ecc;
2206 int page, ret, bbpos, bad = 0;
2207
2208 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2209
2210
2211
2212
2213
2214
2215
2216 host->use_ecc = false;
2217
2218 clear_bam_transaction(nandc);
2219 ret = copy_last_cw(host, page);
2220 if (ret)
2221 goto err;
2222
2223 if (check_flash_errors(host, 1)) {
2224 dev_warn(nandc->dev, "error when trying to read BBM\n");
2225 goto err;
2226 }
2227
2228 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2229
2230 bad = nandc->data_buffer[bbpos] != 0xff;
2231
2232 if (chip->options & NAND_BUSWIDTH_16)
2233 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2234err:
2235 return bad;
2236}
2237
2238static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
2239{
2240 struct nand_chip *chip = mtd_to_nand(mtd);
2241 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2242 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2243 struct nand_ecc_ctrl *ecc = &chip->ecc;
2244 int page, ret;
2245
2246 clear_read_regs(nandc);
2247 clear_bam_transaction(nandc);
2248
2249
2250
2251
2252
2253
2254 memset(nandc->data_buffer, 0x00, host->cw_size);
2255
2256 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2257
2258
2259 host->use_ecc = false;
2260 set_address(host, host->cw_size * (ecc->steps - 1), page);
2261 update_rw_regs(host, 1, false);
2262
2263 config_nand_page_write(nandc);
2264 write_data_dma(nandc, FLASH_BUF_ACC,
2265 nandc->data_buffer, host->cw_size, 0);
2266 config_nand_cw_write(nandc);
2267
2268 ret = submit_descs(nandc);
2269
2270 free_descs(nandc);
2271
2272 if (ret) {
2273 dev_err(nandc->dev, "failure to update BBM\n");
2274 return -EIO;
2275 }
2276
2277 return nand_prog_page_end_op(chip);
2278}
2279
2280
2281
2282
2283
2284
2285
2286static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
2287{
2288 struct nand_chip *chip = mtd_to_nand(mtd);
2289 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2290 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2291 u8 *buf = nandc->data_buffer;
2292 u8 ret = 0x0;
2293
2294 if (host->last_command == NAND_CMD_STATUS) {
2295 ret = host->status;
2296
2297 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2298
2299 return ret;
2300 }
2301
2302 if (nandc->buf_start < nandc->buf_count)
2303 ret = buf[nandc->buf_start++];
2304
2305 return ret;
2306}
2307
2308static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
2309{
2310 struct nand_chip *chip = mtd_to_nand(mtd);
2311 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2312 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2313
2314 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2315 nandc->buf_start += real_len;
2316}
2317
2318static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
2319 int len)
2320{
2321 struct nand_chip *chip = mtd_to_nand(mtd);
2322 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2323 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2324
2325 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2326
2327 nandc->buf_start += real_len;
2328}
2329
2330
2331static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
2332{
2333 struct nand_chip *chip = mtd_to_nand(mtd);
2334 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2335
2336 if (chipnr <= 0)
2337 return;
2338
2339 dev_warn(nandc->dev, "invalid chip select\n");
2340}
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2428 struct mtd_oob_region *oobregion)
2429{
2430 struct nand_chip *chip = mtd_to_nand(mtd);
2431 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2432 struct nand_ecc_ctrl *ecc = &chip->ecc;
2433
2434 if (section > 1)
2435 return -ERANGE;
2436
2437 if (!section) {
2438 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2439 host->bbm_size;
2440 oobregion->offset = 0;
2441 } else {
2442 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2443 oobregion->offset = mtd->oobsize - oobregion->length;
2444 }
2445
2446 return 0;
2447}
2448
2449static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2450 struct mtd_oob_region *oobregion)
2451{
2452 struct nand_chip *chip = mtd_to_nand(mtd);
2453 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2454 struct nand_ecc_ctrl *ecc = &chip->ecc;
2455
2456 if (section)
2457 return -ERANGE;
2458
2459 oobregion->length = ecc->steps * 4;
2460 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2461
2462 return 0;
2463}
2464
2465static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2466 .ecc = qcom_nand_ooblayout_ecc,
2467 .free = qcom_nand_ooblayout_free,
2468};
2469
2470static int
2471qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2472{
2473 return strength == 4 ? 12 : 16;
2474}
2475NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2476 NANDC_STEP_SIZE, 4, 8);
2477
2478static int qcom_nand_attach_chip(struct nand_chip *chip)
2479{
2480 struct mtd_info *mtd = nand_to_mtd(chip);
2481 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2482 struct nand_ecc_ctrl *ecc = &chip->ecc;
2483 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2484 int cwperpage, bad_block_byte, ret;
2485 bool wide_bus;
2486 int ecc_mode = 1;
2487
2488
2489 ecc->size = NANDC_STEP_SIZE;
2490 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2491 cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2492
2493
2494
2495
2496
2497 ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2498 mtd->oobsize - (cwperpage * 4));
2499 if (ret) {
2500 dev_err(nandc->dev, "No valid ECC settings possible\n");
2501 return ret;
2502 }
2503
2504 if (ecc->strength >= 8) {
2505
2506 host->bch_enabled = true;
2507 ecc_mode = 1;
2508
2509 if (wide_bus) {
2510 host->ecc_bytes_hw = 14;
2511 host->spare_bytes = 0;
2512 host->bbm_size = 2;
2513 } else {
2514 host->ecc_bytes_hw = 13;
2515 host->spare_bytes = 2;
2516 host->bbm_size = 1;
2517 }
2518 } else {
2519
2520
2521
2522
2523
2524 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2525
2526 host->bch_enabled = true;
2527 ecc_mode = 0;
2528
2529 if (wide_bus) {
2530 host->ecc_bytes_hw = 8;
2531 host->spare_bytes = 2;
2532 host->bbm_size = 2;
2533 } else {
2534 host->ecc_bytes_hw = 7;
2535 host->spare_bytes = 4;
2536 host->bbm_size = 1;
2537 }
2538 } else {
2539
2540 host->ecc_bytes_hw = 10;
2541
2542 if (wide_bus) {
2543 host->spare_bytes = 0;
2544 host->bbm_size = 2;
2545 } else {
2546 host->spare_bytes = 1;
2547 host->bbm_size = 1;
2548 }
2549 }
2550 }
2551
2552
2553
2554
2555
2556
2557
2558 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2559
2560 ecc->read_page = qcom_nandc_read_page;
2561 ecc->read_page_raw = qcom_nandc_read_page_raw;
2562 ecc->read_oob = qcom_nandc_read_oob;
2563 ecc->write_page = qcom_nandc_write_page;
2564 ecc->write_page_raw = qcom_nandc_write_page_raw;
2565 ecc->write_oob = qcom_nandc_write_oob;
2566
2567 ecc->mode = NAND_ECC_HW;
2568
2569 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2570
2571 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2572 cwperpage);
2573
2574
2575
2576
2577
2578
2579 host->cw_data = 516;
2580
2581
2582
2583
2584
2585 host->cw_size = host->cw_data + ecc->bytes;
2586 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2587
2588 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2589 | host->cw_data << UD_SIZE_BYTES
2590 | 0 << DISABLE_STATUS_AFTER_WRITE
2591 | 5 << NUM_ADDR_CYCLES
2592 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2593 | 0 << STATUS_BFR_READ
2594 | 1 << SET_RD_MODE_AFTER_STATUS
2595 | host->spare_bytes << SPARE_SIZE_BYTES;
2596
2597 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2598 | 0 << CS_ACTIVE_BSY
2599 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2600 | 0 << BAD_BLOCK_IN_SPARE_AREA
2601 | 2 << WR_RD_BSY_GAP
2602 | wide_bus << WIDE_FLASH
2603 | host->bch_enabled << ENABLE_BCH_ECC;
2604
2605 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2606 | host->cw_size << UD_SIZE_BYTES
2607 | 5 << NUM_ADDR_CYCLES
2608 | 0 << SPARE_SIZE_BYTES;
2609
2610 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2611 | 0 << CS_ACTIVE_BSY
2612 | 17 << BAD_BLOCK_BYTE_NUM
2613 | 1 << BAD_BLOCK_IN_SPARE_AREA
2614 | 2 << WR_RD_BSY_GAP
2615 | wide_bus << WIDE_FLASH
2616 | 1 << DEV0_CFG1_ECC_DISABLE;
2617
2618 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2619 | 0 << ECC_SW_RESET
2620 | host->cw_data << ECC_NUM_DATA_BYTES
2621 | 1 << ECC_FORCE_CLK_OPEN
2622 | ecc_mode << ECC_MODE
2623 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2624
2625 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2626
2627 host->clrflashstatus = FS_READY_BSY_N;
2628 host->clrreadstatus = 0xc0;
2629 nandc->regs->erased_cw_detect_cfg_clr =
2630 cpu_to_le32(CLR_ERASED_PAGE_DET);
2631 nandc->regs->erased_cw_detect_cfg_set =
2632 cpu_to_le32(SET_ERASED_PAGE_DET);
2633
2634 dev_dbg(nandc->dev,
2635 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2636 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2637 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2638 cwperpage);
2639
2640 return 0;
2641}
2642
2643static const struct nand_controller_ops qcom_nandc_ops = {
2644 .attach_chip = qcom_nand_attach_chip,
2645};
2646
2647static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2648{
2649 int ret;
2650
2651 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2652 if (ret) {
2653 dev_err(nandc->dev, "failed to set DMA mask\n");
2654 return ret;
2655 }
2656
2657
2658
2659
2660
2661
2662
2663 nandc->buf_size = 532;
2664
2665 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2666 GFP_KERNEL);
2667 if (!nandc->data_buffer)
2668 return -ENOMEM;
2669
2670 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2671 GFP_KERNEL);
2672 if (!nandc->regs)
2673 return -ENOMEM;
2674
2675 nandc->reg_read_buf = devm_kcalloc(nandc->dev,
2676 MAX_REG_RD, sizeof(*nandc->reg_read_buf),
2677 GFP_KERNEL);
2678 if (!nandc->reg_read_buf)
2679 return -ENOMEM;
2680
2681 if (nandc->props->is_bam) {
2682 nandc->reg_read_dma =
2683 dma_map_single(nandc->dev, nandc->reg_read_buf,
2684 MAX_REG_RD *
2685 sizeof(*nandc->reg_read_buf),
2686 DMA_FROM_DEVICE);
2687 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2688 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2689 return -EIO;
2690 }
2691
2692 nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
2693 if (!nandc->tx_chan) {
2694 dev_err(nandc->dev, "failed to request tx channel\n");
2695 return -ENODEV;
2696 }
2697
2698 nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
2699 if (!nandc->rx_chan) {
2700 dev_err(nandc->dev, "failed to request rx channel\n");
2701 return -ENODEV;
2702 }
2703
2704 nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
2705 if (!nandc->cmd_chan) {
2706 dev_err(nandc->dev, "failed to request cmd channel\n");
2707 return -ENODEV;
2708 }
2709
2710
2711
2712
2713
2714
2715
2716 nandc->max_cwperpage = 1;
2717 nandc->bam_txn = alloc_bam_transaction(nandc);
2718 if (!nandc->bam_txn) {
2719 dev_err(nandc->dev,
2720 "failed to allocate bam transaction\n");
2721 return -ENOMEM;
2722 }
2723 } else {
2724 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
2725 if (!nandc->chan) {
2726 dev_err(nandc->dev,
2727 "failed to request slave channel\n");
2728 return -ENODEV;
2729 }
2730 }
2731
2732 INIT_LIST_HEAD(&nandc->desc_list);
2733 INIT_LIST_HEAD(&nandc->host_list);
2734
2735 nand_controller_init(&nandc->controller);
2736 nandc->controller.ops = &qcom_nandc_ops;
2737
2738 return 0;
2739}
2740
2741static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2742{
2743 if (nandc->props->is_bam) {
2744 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2745 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2746 MAX_REG_RD *
2747 sizeof(*nandc->reg_read_buf),
2748 DMA_FROM_DEVICE);
2749
2750 if (nandc->tx_chan)
2751 dma_release_channel(nandc->tx_chan);
2752
2753 if (nandc->rx_chan)
2754 dma_release_channel(nandc->rx_chan);
2755
2756 if (nandc->cmd_chan)
2757 dma_release_channel(nandc->cmd_chan);
2758 } else {
2759 if (nandc->chan)
2760 dma_release_channel(nandc->chan);
2761 }
2762}
2763
2764
2765static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2766{
2767 u32 nand_ctrl;
2768
2769
2770 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2771 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2772 NAND_DEV_CMD_VLD_VAL);
2773
2774
2775 if (nandc->props->is_bam) {
2776 nand_ctrl = nandc_read(nandc, NAND_CTRL);
2777 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2778 } else {
2779 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2780 }
2781
2782
2783 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2784 nandc->vld = NAND_DEV_CMD_VLD_VAL;
2785
2786 return 0;
2787}
2788
2789static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2790 struct qcom_nand_host *host,
2791 struct device_node *dn)
2792{
2793 struct nand_chip *chip = &host->chip;
2794 struct mtd_info *mtd = nand_to_mtd(chip);
2795 struct device *dev = nandc->dev;
2796 int ret;
2797
2798 ret = of_property_read_u32(dn, "reg", &host->cs);
2799 if (ret) {
2800 dev_err(dev, "can't get chip-select\n");
2801 return -ENXIO;
2802 }
2803
2804 nand_set_flash_node(chip, dn);
2805 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2806 if (!mtd->name)
2807 return -ENOMEM;
2808
2809 mtd->owner = THIS_MODULE;
2810 mtd->dev.parent = dev;
2811
2812 chip->cmdfunc = qcom_nandc_command;
2813 chip->select_chip = qcom_nandc_select_chip;
2814 chip->read_byte = qcom_nandc_read_byte;
2815 chip->read_buf = qcom_nandc_read_buf;
2816 chip->write_buf = qcom_nandc_write_buf;
2817 chip->set_features = nand_get_set_features_notsupp;
2818 chip->get_features = nand_get_set_features_notsupp;
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828 chip->block_bad = qcom_nandc_block_bad;
2829 chip->block_markbad = qcom_nandc_block_markbad;
2830
2831 chip->controller = &nandc->controller;
2832 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2833 NAND_SKIP_BBTSCAN;
2834
2835
2836 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2837
2838 ret = nand_scan(mtd, 1);
2839 if (ret)
2840 return ret;
2841
2842 ret = mtd_device_register(mtd, NULL, 0);
2843 if (ret)
2844 nand_cleanup(chip);
2845
2846 return ret;
2847}
2848
2849static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2850{
2851 struct device *dev = nandc->dev;
2852 struct device_node *dn = dev->of_node, *child;
2853 struct qcom_nand_host *host;
2854 int ret;
2855
2856 if (nandc->props->is_bam) {
2857 free_bam_transaction(nandc);
2858 nandc->bam_txn = alloc_bam_transaction(nandc);
2859 if (!nandc->bam_txn) {
2860 dev_err(nandc->dev,
2861 "failed to allocate bam transaction\n");
2862 return -ENOMEM;
2863 }
2864 }
2865
2866 for_each_available_child_of_node(dn, child) {
2867 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2868 if (!host) {
2869 of_node_put(child);
2870 return -ENOMEM;
2871 }
2872
2873 ret = qcom_nand_host_init_and_register(nandc, host, child);
2874 if (ret) {
2875 devm_kfree(dev, host);
2876 continue;
2877 }
2878
2879 list_add_tail(&host->node, &nandc->host_list);
2880 }
2881
2882 if (list_empty(&nandc->host_list))
2883 return -ENODEV;
2884
2885 return 0;
2886}
2887
2888
2889static int qcom_nandc_parse_dt(struct platform_device *pdev)
2890{
2891 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2892 struct device_node *np = nandc->dev->of_node;
2893 int ret;
2894
2895 if (!nandc->props->is_bam) {
2896 ret = of_property_read_u32(np, "qcom,cmd-crci",
2897 &nandc->cmd_crci);
2898 if (ret) {
2899 dev_err(nandc->dev, "command CRCI unspecified\n");
2900 return ret;
2901 }
2902
2903 ret = of_property_read_u32(np, "qcom,data-crci",
2904 &nandc->data_crci);
2905 if (ret) {
2906 dev_err(nandc->dev, "data CRCI unspecified\n");
2907 return ret;
2908 }
2909 }
2910
2911 return 0;
2912}
2913
2914static int qcom_nandc_probe(struct platform_device *pdev)
2915{
2916 struct qcom_nand_controller *nandc;
2917 const void *dev_data;
2918 struct device *dev = &pdev->dev;
2919 struct resource *res;
2920 int ret;
2921
2922 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2923 if (!nandc)
2924 return -ENOMEM;
2925
2926 platform_set_drvdata(pdev, nandc);
2927 nandc->dev = dev;
2928
2929 dev_data = of_device_get_match_data(dev);
2930 if (!dev_data) {
2931 dev_err(&pdev->dev, "failed to get device data\n");
2932 return -ENODEV;
2933 }
2934
2935 nandc->props = dev_data;
2936
2937 nandc->core_clk = devm_clk_get(dev, "core");
2938 if (IS_ERR(nandc->core_clk))
2939 return PTR_ERR(nandc->core_clk);
2940
2941 nandc->aon_clk = devm_clk_get(dev, "aon");
2942 if (IS_ERR(nandc->aon_clk))
2943 return PTR_ERR(nandc->aon_clk);
2944
2945 ret = qcom_nandc_parse_dt(pdev);
2946 if (ret)
2947 return ret;
2948
2949 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2950 nandc->base = devm_ioremap_resource(dev, res);
2951 if (IS_ERR(nandc->base))
2952 return PTR_ERR(nandc->base);
2953
2954 nandc->base_phys = res->start;
2955 nandc->base_dma = dma_map_resource(dev, res->start,
2956 resource_size(res),
2957 DMA_BIDIRECTIONAL, 0);
2958 if (!nandc->base_dma)
2959 return -ENXIO;
2960
2961 ret = qcom_nandc_alloc(nandc);
2962 if (ret)
2963 goto err_nandc_alloc;
2964
2965 ret = clk_prepare_enable(nandc->core_clk);
2966 if (ret)
2967 goto err_core_clk;
2968
2969 ret = clk_prepare_enable(nandc->aon_clk);
2970 if (ret)
2971 goto err_aon_clk;
2972
2973 ret = qcom_nandc_setup(nandc);
2974 if (ret)
2975 goto err_setup;
2976
2977 ret = qcom_probe_nand_devices(nandc);
2978 if (ret)
2979 goto err_setup;
2980
2981 return 0;
2982
2983err_setup:
2984 clk_disable_unprepare(nandc->aon_clk);
2985err_aon_clk:
2986 clk_disable_unprepare(nandc->core_clk);
2987err_core_clk:
2988 qcom_nandc_unalloc(nandc);
2989err_nandc_alloc:
2990 dma_unmap_resource(dev, res->start, resource_size(res),
2991 DMA_BIDIRECTIONAL, 0);
2992
2993 return ret;
2994}
2995
2996static int qcom_nandc_remove(struct platform_device *pdev)
2997{
2998 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2999 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3000 struct qcom_nand_host *host;
3001
3002 list_for_each_entry(host, &nandc->host_list, node)
3003 nand_release(nand_to_mtd(&host->chip));
3004
3005
3006 qcom_nandc_unalloc(nandc);
3007
3008 clk_disable_unprepare(nandc->aon_clk);
3009 clk_disable_unprepare(nandc->core_clk);
3010
3011 dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
3012 DMA_BIDIRECTIONAL, 0);
3013
3014 return 0;
3015}
3016
3017static const struct qcom_nandc_props ipq806x_nandc_props = {
3018 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3019 .is_bam = false,
3020 .dev_cmd_reg_start = 0x0,
3021};
3022
3023static const struct qcom_nandc_props ipq4019_nandc_props = {
3024 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3025 .is_bam = true,
3026 .dev_cmd_reg_start = 0x0,
3027};
3028
3029static const struct qcom_nandc_props ipq8074_nandc_props = {
3030 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3031 .is_bam = true,
3032 .dev_cmd_reg_start = 0x7000,
3033};
3034
3035
3036
3037
3038
3039static const struct of_device_id qcom_nandc_of_match[] = {
3040 {
3041 .compatible = "qcom,ipq806x-nand",
3042 .data = &ipq806x_nandc_props,
3043 },
3044 {
3045 .compatible = "qcom,ipq4019-nand",
3046 .data = &ipq4019_nandc_props,
3047 },
3048 {
3049 .compatible = "qcom,ipq8074-nand",
3050 .data = &ipq8074_nandc_props,
3051 },
3052 {}
3053};
3054MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3055
3056static struct platform_driver qcom_nandc_driver = {
3057 .driver = {
3058 .name = "qcom-nandc",
3059 .of_match_table = qcom_nandc_of_match,
3060 },
3061 .probe = qcom_nandc_probe,
3062 .remove = qcom_nandc_remove,
3063};
3064module_platform_driver(qcom_nandc_driver);
3065
3066MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3067MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3068MODULE_LICENSE("GPL v2");
3069