1
2
3
4
5
6#include <linux/clk.h>
7#include <linux/slab.h>
8#include <linux/bitops.h>
9#include <linux/dma-mapping.h>
10#include <linux/dmaengine.h>
11#include <linux/module.h>
12#include <linux/mtd/rawnand.h>
13#include <linux/mtd/partitions.h>
14#include <linux/of.h>
15#include <linux/of_device.h>
16#include <linux/delay.h>
17#include <linux/dma/qcom_bam_dma.h>
18
19
20#define NAND_FLASH_CMD 0x00
21#define NAND_ADDR0 0x04
22#define NAND_ADDR1 0x08
23#define NAND_FLASH_CHIP_SELECT 0x0c
24#define NAND_EXEC_CMD 0x10
25#define NAND_FLASH_STATUS 0x14
26#define NAND_BUFFER_STATUS 0x18
27#define NAND_DEV0_CFG0 0x20
28#define NAND_DEV0_CFG1 0x24
29#define NAND_DEV0_ECC_CFG 0x28
30#define NAND_DEV1_ECC_CFG 0x2c
31#define NAND_DEV1_CFG0 0x30
32#define NAND_DEV1_CFG1 0x34
33#define NAND_READ_ID 0x40
34#define NAND_READ_STATUS 0x44
35#define NAND_DEV_CMD0 0xa0
36#define NAND_DEV_CMD1 0xa4
37#define NAND_DEV_CMD2 0xa8
38#define NAND_DEV_CMD_VLD 0xac
39#define SFLASHC_BURST_CFG 0xe0
40#define NAND_ERASED_CW_DETECT_CFG 0xe8
41#define NAND_ERASED_CW_DETECT_STATUS 0xec
42#define NAND_EBI2_ECC_BUF_CFG 0xf0
43#define FLASH_BUF_ACC 0x100
44
45#define NAND_CTRL 0xf00
46#define NAND_VERSION 0xf08
47#define NAND_READ_LOCATION_0 0xf20
48#define NAND_READ_LOCATION_1 0xf24
49#define NAND_READ_LOCATION_2 0xf28
50#define NAND_READ_LOCATION_3 0xf2c
51
52
53#define NAND_DEV_CMD1_RESTORE 0xdead
54#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
55
56
57#define PAGE_ACC BIT(4)
58#define LAST_PAGE BIT(5)
59
60
61#define NAND_DEV_SEL 0
62#define DM_EN BIT(2)
63
64
65#define FS_OP_ERR BIT(4)
66#define FS_READY_BSY_N BIT(5)
67#define FS_MPU_ERR BIT(8)
68#define FS_DEVICE_STS_ERR BIT(16)
69#define FS_DEVICE_WP BIT(23)
70
71
72#define BS_UNCORRECTABLE_BIT BIT(8)
73#define BS_CORRECTABLE_ERR_MSK 0x1f
74
75
76#define DISABLE_STATUS_AFTER_WRITE 4
77#define CW_PER_PAGE 6
78#define UD_SIZE_BYTES 9
79#define ECC_PARITY_SIZE_BYTES_RS 19
80#define SPARE_SIZE_BYTES 23
81#define NUM_ADDR_CYCLES 27
82#define STATUS_BFR_READ 30
83#define SET_RD_MODE_AFTER_STATUS 31
84
85
86#define DEV0_CFG1_ECC_DISABLE 0
87#define WIDE_FLASH 1
88#define NAND_RECOVERY_CYCLES 2
89#define CS_ACTIVE_BSY 5
90#define BAD_BLOCK_BYTE_NUM 6
91#define BAD_BLOCK_IN_SPARE_AREA 16
92#define WR_RD_BSY_GAP 17
93#define ENABLE_BCH_ECC 27
94
95
96#define ECC_CFG_ECC_DISABLE 0
97#define ECC_SW_RESET 1
98#define ECC_MODE 4
99#define ECC_PARITY_SIZE_BYTES_BCH 8
100#define ECC_NUM_DATA_BYTES 16
101#define ECC_FORCE_CLK_OPEN 30
102
103
104#define READ_ADDR 0
105
106
107#define READ_START_VLD BIT(0)
108#define READ_STOP_VLD BIT(1)
109#define WRITE_START_VLD BIT(2)
110#define ERASE_START_VLD BIT(3)
111#define SEQ_READ_START_VLD BIT(4)
112
113
114#define NUM_STEPS 0
115
116
117#define ERASED_CW_ECC_MASK 1
118#define AUTO_DETECT_RES 0
119#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
120#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
121#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
122#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
123#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
124
125
126#define PAGE_ALL_ERASED BIT(7)
127#define CODEWORD_ALL_ERASED BIT(6)
128#define PAGE_ERASED BIT(5)
129#define CODEWORD_ERASED BIT(4)
130#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
131#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
132
133
134#define READ_LOCATION_OFFSET 0
135#define READ_LOCATION_SIZE 16
136#define READ_LOCATION_LAST 31
137
138
139#define NAND_VERSION_MAJOR_MASK 0xf0000000
140#define NAND_VERSION_MAJOR_SHIFT 28
141#define NAND_VERSION_MINOR_MASK 0x0fff0000
142#define NAND_VERSION_MINOR_SHIFT 16
143
144
145#define OP_PAGE_READ 0x2
146#define OP_PAGE_READ_WITH_ECC 0x3
147#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
148#define OP_PROGRAM_PAGE 0x6
149#define OP_PAGE_PROGRAM_WITH_ECC 0x7
150#define OP_PROGRAM_PAGE_SPARE 0x9
151#define OP_BLOCK_ERASE 0xa
152#define OP_FETCH_ID 0xb
153#define OP_RESET_DEVICE 0xd
154
155
156#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
157 ERASE_START_VLD | SEQ_READ_START_VLD)
158
159
160#define BAM_MODE_EN BIT(0)
161
162
163
164
165
166#define NANDC_STEP_SIZE 512
167
168
169
170
171
172#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
173
174
175#define MAX_REG_RD (3 * MAX_NUM_STEPS)
176
177
178#define ECC_NONE BIT(0)
179#define ECC_RS_4BIT BIT(1)
180#define ECC_BCH_4BIT BIT(2)
181#define ECC_BCH_8BIT BIT(3)
182
183#define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
184nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
185 ((offset) << READ_LOCATION_OFFSET) | \
186 ((size) << READ_LOCATION_SIZE) | \
187 ((is_last) << READ_LOCATION_LAST))
188
189
190
191
192
193#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
194
195
196#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
197
198
199#define reg_buf_dma_addr(chip, vaddr) \
200 ((chip)->reg_read_dma + \
201 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
202
203#define QPIC_PER_CW_CMD_ELEMENTS 32
204#define QPIC_PER_CW_CMD_SGL 32
205#define QPIC_PER_CW_DATA_SGL 8
206
207#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
208
209
210
211
212
213
214#define NAND_BAM_NO_EOT BIT(0)
215
216#define NAND_BAM_NWD BIT(1)
217
218#define NAND_BAM_NEXT_SGL BIT(2)
219
220
221
222
223#define NAND_ERASED_CW_SET BIT(4)
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247struct bam_transaction {
248 struct bam_cmd_element *bam_ce;
249 struct scatterlist *cmd_sgl;
250 struct scatterlist *data_sgl;
251 u32 bam_ce_pos;
252 u32 bam_ce_start;
253 u32 cmd_sgl_pos;
254 u32 cmd_sgl_start;
255 u32 tx_sgl_pos;
256 u32 tx_sgl_start;
257 u32 rx_sgl_pos;
258 u32 rx_sgl_start;
259 bool wait_second_completion;
260 struct completion txn_done;
261 struct dma_async_tx_descriptor *last_data_desc;
262 struct dma_async_tx_descriptor *last_cmd_desc;
263};
264
265
266
267
268
269
270
271
272
273
274
275struct desc_info {
276 struct list_head node;
277
278 enum dma_data_direction dir;
279 union {
280 struct scatterlist adm_sgl;
281 struct {
282 struct scatterlist *bam_sgl;
283 int sgl_cnt;
284 };
285 };
286 struct dma_async_tx_descriptor *dma_desc;
287};
288
289
290
291
292
293struct nandc_regs {
294 __le32 cmd;
295 __le32 addr0;
296 __le32 addr1;
297 __le32 chip_sel;
298 __le32 exec;
299
300 __le32 cfg0;
301 __le32 cfg1;
302 __le32 ecc_bch_cfg;
303
304 __le32 clrflashstatus;
305 __le32 clrreadstatus;
306
307 __le32 cmd1;
308 __le32 vld;
309
310 __le32 orig_cmd1;
311 __le32 orig_vld;
312
313 __le32 ecc_buf_cfg;
314 __le32 read_location0;
315 __le32 read_location1;
316 __le32 read_location2;
317 __le32 read_location3;
318
319 __le32 erased_cw_detect_cfg_clr;
320 __le32 erased_cw_detect_cfg_set;
321};
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359struct qcom_nand_controller {
360 struct nand_controller controller;
361 struct list_head host_list;
362
363 struct device *dev;
364
365 void __iomem *base;
366 phys_addr_t base_phys;
367 dma_addr_t base_dma;
368
369 struct clk *core_clk;
370 struct clk *aon_clk;
371
372 union {
373
374 struct {
375 struct dma_chan *tx_chan;
376 struct dma_chan *rx_chan;
377 struct dma_chan *cmd_chan;
378 };
379
380
381 struct {
382 struct dma_chan *chan;
383 unsigned int cmd_crci;
384 unsigned int data_crci;
385 };
386 };
387
388 struct list_head desc_list;
389 struct bam_transaction *bam_txn;
390
391 u8 *data_buffer;
392 int buf_size;
393 int buf_count;
394 int buf_start;
395 unsigned int max_cwperpage;
396
397 __le32 *reg_read_buf;
398 dma_addr_t reg_read_dma;
399 int reg_read_pos;
400
401 struct nandc_regs *regs;
402
403 u32 cmd1, vld;
404 const struct qcom_nandc_props *props;
405};
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434struct qcom_nand_host {
435 struct nand_chip chip;
436 struct list_head node;
437
438 int cs;
439 int cw_size;
440 int cw_data;
441 bool use_ecc;
442 bool bch_enabled;
443 int ecc_bytes_hw;
444 int spare_bytes;
445 int bbm_size;
446 u8 status;
447 int last_command;
448
449 u32 cfg0, cfg1;
450 u32 cfg0_raw, cfg1_raw;
451 u32 ecc_buf_cfg;
452 u32 ecc_bch_cfg;
453 u32 clrflashstatus;
454 u32 clrreadstatus;
455};
456
457
458
459
460
461
462
463
464
465struct qcom_nandc_props {
466 u32 ecc_modes;
467 bool is_bam;
468 bool is_qpic;
469 u32 dev_cmd_reg_start;
470};
471
472
473static void free_bam_transaction(struct qcom_nand_controller *nandc)
474{
475 struct bam_transaction *bam_txn = nandc->bam_txn;
476
477 devm_kfree(nandc->dev, bam_txn);
478}
479
480
481static struct bam_transaction *
482alloc_bam_transaction(struct qcom_nand_controller *nandc)
483{
484 struct bam_transaction *bam_txn;
485 size_t bam_txn_size;
486 unsigned int num_cw = nandc->max_cwperpage;
487 void *bam_txn_buf;
488
489 bam_txn_size =
490 sizeof(*bam_txn) + num_cw *
491 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
492 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
493 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
494
495 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
496 if (!bam_txn_buf)
497 return NULL;
498
499 bam_txn = bam_txn_buf;
500 bam_txn_buf += sizeof(*bam_txn);
501
502 bam_txn->bam_ce = bam_txn_buf;
503 bam_txn_buf +=
504 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
505
506 bam_txn->cmd_sgl = bam_txn_buf;
507 bam_txn_buf +=
508 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
509
510 bam_txn->data_sgl = bam_txn_buf;
511
512 init_completion(&bam_txn->txn_done);
513
514 return bam_txn;
515}
516
517
518static void clear_bam_transaction(struct qcom_nand_controller *nandc)
519{
520 struct bam_transaction *bam_txn = nandc->bam_txn;
521
522 if (!nandc->props->is_bam)
523 return;
524
525 bam_txn->bam_ce_pos = 0;
526 bam_txn->bam_ce_start = 0;
527 bam_txn->cmd_sgl_pos = 0;
528 bam_txn->cmd_sgl_start = 0;
529 bam_txn->tx_sgl_pos = 0;
530 bam_txn->tx_sgl_start = 0;
531 bam_txn->rx_sgl_pos = 0;
532 bam_txn->rx_sgl_start = 0;
533 bam_txn->last_data_desc = NULL;
534 bam_txn->wait_second_completion = false;
535
536 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
537 QPIC_PER_CW_CMD_SGL);
538 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
539 QPIC_PER_CW_DATA_SGL);
540
541 reinit_completion(&bam_txn->txn_done);
542}
543
544
545static void qpic_bam_dma_done(void *data)
546{
547 struct bam_transaction *bam_txn = data;
548
549
550
551
552
553
554
555
556 if (bam_txn->wait_second_completion)
557 bam_txn->wait_second_completion = false;
558 else
559 complete(&bam_txn->txn_done);
560}
561
562static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
563{
564 return container_of(chip, struct qcom_nand_host, chip);
565}
566
567static inline struct qcom_nand_controller *
568get_qcom_nand_controller(struct nand_chip *chip)
569{
570 return container_of(chip->controller, struct qcom_nand_controller,
571 controller);
572}
573
574static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
575{
576 return ioread32(nandc->base + offset);
577}
578
579static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
580 u32 val)
581{
582 iowrite32(val, nandc->base + offset);
583}
584
585static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
586 bool is_cpu)
587{
588 if (!nandc->props->is_bam)
589 return;
590
591 if (is_cpu)
592 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
593 MAX_REG_RD *
594 sizeof(*nandc->reg_read_buf),
595 DMA_FROM_DEVICE);
596 else
597 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
598 MAX_REG_RD *
599 sizeof(*nandc->reg_read_buf),
600 DMA_FROM_DEVICE);
601}
602
603static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
604{
605 switch (offset) {
606 case NAND_FLASH_CMD:
607 return ®s->cmd;
608 case NAND_ADDR0:
609 return ®s->addr0;
610 case NAND_ADDR1:
611 return ®s->addr1;
612 case NAND_FLASH_CHIP_SELECT:
613 return ®s->chip_sel;
614 case NAND_EXEC_CMD:
615 return ®s->exec;
616 case NAND_FLASH_STATUS:
617 return ®s->clrflashstatus;
618 case NAND_DEV0_CFG0:
619 return ®s->cfg0;
620 case NAND_DEV0_CFG1:
621 return ®s->cfg1;
622 case NAND_DEV0_ECC_CFG:
623 return ®s->ecc_bch_cfg;
624 case NAND_READ_STATUS:
625 return ®s->clrreadstatus;
626 case NAND_DEV_CMD1:
627 return ®s->cmd1;
628 case NAND_DEV_CMD1_RESTORE:
629 return ®s->orig_cmd1;
630 case NAND_DEV_CMD_VLD:
631 return ®s->vld;
632 case NAND_DEV_CMD_VLD_RESTORE:
633 return ®s->orig_vld;
634 case NAND_EBI2_ECC_BUF_CFG:
635 return ®s->ecc_buf_cfg;
636 case NAND_READ_LOCATION_0:
637 return ®s->read_location0;
638 case NAND_READ_LOCATION_1:
639 return ®s->read_location1;
640 case NAND_READ_LOCATION_2:
641 return ®s->read_location2;
642 case NAND_READ_LOCATION_3:
643 return ®s->read_location3;
644 default:
645 return NULL;
646 }
647}
648
649static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
650 u32 val)
651{
652 struct nandc_regs *regs = nandc->regs;
653 __le32 *reg;
654
655 reg = offset_to_nandc_reg(regs, offset);
656
657 if (reg)
658 *reg = cpu_to_le32(val);
659}
660
661
662static void set_address(struct qcom_nand_host *host, u16 column, int page)
663{
664 struct nand_chip *chip = &host->chip;
665 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
666
667 if (chip->options & NAND_BUSWIDTH_16)
668 column >>= 1;
669
670 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
671 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
672}
673
674
675
676
677
678
679
680
681static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
682{
683 struct nand_chip *chip = &host->chip;
684 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
685 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
686
687 if (read) {
688 if (host->use_ecc)
689 cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
690 else
691 cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
692 } else {
693 cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
694 }
695
696 if (host->use_ecc) {
697 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
698 (num_cw - 1) << CW_PER_PAGE;
699
700 cfg1 = host->cfg1;
701 ecc_bch_cfg = host->ecc_bch_cfg;
702 } else {
703 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
704 (num_cw - 1) << CW_PER_PAGE;
705
706 cfg1 = host->cfg1_raw;
707 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
708 }
709
710 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
711 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
712 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
713 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
714 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
715 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
716 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
717 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
718
719 if (read)
720 nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
721 host->cw_data : host->cw_size, 1);
722}
723
724
725
726
727
728
729static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
730 struct dma_chan *chan,
731 unsigned long flags)
732{
733 struct desc_info *desc;
734 struct scatterlist *sgl;
735 unsigned int sgl_cnt;
736 int ret;
737 struct bam_transaction *bam_txn = nandc->bam_txn;
738 enum dma_transfer_direction dir_eng;
739 struct dma_async_tx_descriptor *dma_desc;
740
741 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
742 if (!desc)
743 return -ENOMEM;
744
745 if (chan == nandc->cmd_chan) {
746 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
747 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
748 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
749 dir_eng = DMA_MEM_TO_DEV;
750 desc->dir = DMA_TO_DEVICE;
751 } else if (chan == nandc->tx_chan) {
752 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
753 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
754 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
755 dir_eng = DMA_MEM_TO_DEV;
756 desc->dir = DMA_TO_DEVICE;
757 } else {
758 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
759 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
760 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
761 dir_eng = DMA_DEV_TO_MEM;
762 desc->dir = DMA_FROM_DEVICE;
763 }
764
765 sg_mark_end(sgl + sgl_cnt - 1);
766 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
767 if (ret == 0) {
768 dev_err(nandc->dev, "failure in mapping desc\n");
769 kfree(desc);
770 return -ENOMEM;
771 }
772
773 desc->sgl_cnt = sgl_cnt;
774 desc->bam_sgl = sgl;
775
776 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
777 flags);
778
779 if (!dma_desc) {
780 dev_err(nandc->dev, "failure in prep desc\n");
781 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
782 kfree(desc);
783 return -EINVAL;
784 }
785
786 desc->dma_desc = dma_desc;
787
788
789 if (chan == nandc->cmd_chan)
790 bam_txn->last_cmd_desc = dma_desc;
791 else
792 bam_txn->last_data_desc = dma_desc;
793
794 list_add_tail(&desc->node, &nandc->desc_list);
795
796 return 0;
797}
798
799
800
801
802
803
804
805
806
807
808static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
809 int reg_off, const void *vaddr,
810 int size, unsigned int flags)
811{
812 int bam_ce_size;
813 int i, ret;
814 struct bam_cmd_element *bam_ce_buffer;
815 struct bam_transaction *bam_txn = nandc->bam_txn;
816
817 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
818
819
820 for (i = 0; i < size; i++) {
821 if (read)
822 bam_prep_ce(&bam_ce_buffer[i],
823 nandc_reg_phys(nandc, reg_off + 4 * i),
824 BAM_READ_COMMAND,
825 reg_buf_dma_addr(nandc,
826 (__le32 *)vaddr + i));
827 else
828 bam_prep_ce_le32(&bam_ce_buffer[i],
829 nandc_reg_phys(nandc, reg_off + 4 * i),
830 BAM_WRITE_COMMAND,
831 *((__le32 *)vaddr + i));
832 }
833
834 bam_txn->bam_ce_pos += size;
835
836
837 if (flags & NAND_BAM_NEXT_SGL) {
838 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
839 bam_ce_size = (bam_txn->bam_ce_pos -
840 bam_txn->bam_ce_start) *
841 sizeof(struct bam_cmd_element);
842 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
843 bam_ce_buffer, bam_ce_size);
844 bam_txn->cmd_sgl_pos++;
845 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
846
847 if (flags & NAND_BAM_NWD) {
848 ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
849 DMA_PREP_FENCE |
850 DMA_PREP_CMD);
851 if (ret)
852 return ret;
853 }
854 }
855
856 return 0;
857}
858
859
860
861
862
863static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
864 const void *vaddr,
865 int size, unsigned int flags)
866{
867 int ret;
868 struct bam_transaction *bam_txn = nandc->bam_txn;
869
870 if (read) {
871 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
872 vaddr, size);
873 bam_txn->rx_sgl_pos++;
874 } else {
875 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
876 vaddr, size);
877 bam_txn->tx_sgl_pos++;
878
879
880
881
882
883 if (!(flags & NAND_BAM_NO_EOT)) {
884 ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
885 DMA_PREP_INTERRUPT);
886 if (ret)
887 return ret;
888 }
889 }
890
891 return 0;
892}
893
894static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
895 int reg_off, const void *vaddr, int size,
896 bool flow_control)
897{
898 struct desc_info *desc;
899 struct dma_async_tx_descriptor *dma_desc;
900 struct scatterlist *sgl;
901 struct dma_slave_config slave_conf;
902 enum dma_transfer_direction dir_eng;
903 int ret;
904
905 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
906 if (!desc)
907 return -ENOMEM;
908
909 sgl = &desc->adm_sgl;
910
911 sg_init_one(sgl, vaddr, size);
912
913 if (read) {
914 dir_eng = DMA_DEV_TO_MEM;
915 desc->dir = DMA_FROM_DEVICE;
916 } else {
917 dir_eng = DMA_MEM_TO_DEV;
918 desc->dir = DMA_TO_DEVICE;
919 }
920
921 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
922 if (ret == 0) {
923 ret = -ENOMEM;
924 goto err;
925 }
926
927 memset(&slave_conf, 0x00, sizeof(slave_conf));
928
929 slave_conf.device_fc = flow_control;
930 if (read) {
931 slave_conf.src_maxburst = 16;
932 slave_conf.src_addr = nandc->base_dma + reg_off;
933 slave_conf.slave_id = nandc->data_crci;
934 } else {
935 slave_conf.dst_maxburst = 16;
936 slave_conf.dst_addr = nandc->base_dma + reg_off;
937 slave_conf.slave_id = nandc->cmd_crci;
938 }
939
940 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
941 if (ret) {
942 dev_err(nandc->dev, "failed to configure dma channel\n");
943 goto err;
944 }
945
946 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
947 if (!dma_desc) {
948 dev_err(nandc->dev, "failed to prepare desc\n");
949 ret = -EINVAL;
950 goto err;
951 }
952
953 desc->dma_desc = dma_desc;
954
955 list_add_tail(&desc->node, &nandc->desc_list);
956
957 return 0;
958err:
959 kfree(desc);
960
961 return ret;
962}
963
964
965
966
967
968
969
970
971
972static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
973 int num_regs, unsigned int flags)
974{
975 bool flow_control = false;
976 void *vaddr;
977
978 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
979 nandc->reg_read_pos += num_regs;
980
981 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
982 first = dev_cmd_reg_addr(nandc, first);
983
984 if (nandc->props->is_bam)
985 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
986 num_regs, flags);
987
988 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
989 flow_control = true;
990
991 return prep_adm_dma_desc(nandc, true, first, vaddr,
992 num_regs * sizeof(u32), flow_control);
993}
994
995
996
997
998
999
1000
1001
1002
1003static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1004 int num_regs, unsigned int flags)
1005{
1006 bool flow_control = false;
1007 struct nandc_regs *regs = nandc->regs;
1008 void *vaddr;
1009
1010 vaddr = offset_to_nandc_reg(regs, first);
1011
1012 if (first == NAND_ERASED_CW_DETECT_CFG) {
1013 if (flags & NAND_ERASED_CW_SET)
1014 vaddr = ®s->erased_cw_detect_cfg_set;
1015 else
1016 vaddr = ®s->erased_cw_detect_cfg_clr;
1017 }
1018
1019 if (first == NAND_EXEC_CMD)
1020 flags |= NAND_BAM_NWD;
1021
1022 if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1023 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1024
1025 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1026 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1027
1028 if (nandc->props->is_bam)
1029 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1030 num_regs, flags);
1031
1032 if (first == NAND_FLASH_CMD)
1033 flow_control = true;
1034
1035 return prep_adm_dma_desc(nandc, false, first, vaddr,
1036 num_regs * sizeof(u32), flow_control);
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1049 const u8 *vaddr, int size, unsigned int flags)
1050{
1051 if (nandc->props->is_bam)
1052 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1053
1054 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1067 const u8 *vaddr, int size, unsigned int flags)
1068{
1069 if (nandc->props->is_bam)
1070 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1071
1072 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1073}
1074
1075
1076
1077
1078
1079static void config_nand_page_read(struct qcom_nand_controller *nandc)
1080{
1081 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1082 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1083 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1084 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1085 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1086 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1087}
1088
1089
1090
1091
1092
1093static void
1094config_nand_cw_read(struct qcom_nand_controller *nandc, bool use_ecc)
1095{
1096 if (nandc->props->is_bam)
1097 write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
1098 NAND_BAM_NEXT_SGL);
1099
1100 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1101 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1102
1103 if (use_ecc) {
1104 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1105 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1106 NAND_BAM_NEXT_SGL);
1107 } else {
1108 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1109 }
1110}
1111
1112
1113
1114
1115
1116static void
1117config_nand_single_cw_page_read(struct qcom_nand_controller *nandc,
1118 bool use_ecc)
1119{
1120 config_nand_page_read(nandc);
1121 config_nand_cw_read(nandc, use_ecc);
1122}
1123
1124
1125
1126
1127
1128static void config_nand_page_write(struct qcom_nand_controller *nandc)
1129{
1130 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1131 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1132 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1133 NAND_BAM_NEXT_SGL);
1134}
1135
1136
1137
1138
1139
1140static void config_nand_cw_write(struct qcom_nand_controller *nandc)
1141{
1142 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1143 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1144
1145 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1146
1147 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1148 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1149}
1150
1151
1152
1153
1154
1155
1156
1157static int nandc_param(struct qcom_nand_host *host)
1158{
1159 struct nand_chip *chip = &host->chip;
1160 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1161
1162
1163
1164
1165
1166
1167 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
1168 nandc_set_reg(nandc, NAND_ADDR0, 0);
1169 nandc_set_reg(nandc, NAND_ADDR1, 0);
1170 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1171 | 512 << UD_SIZE_BYTES
1172 | 5 << NUM_ADDR_CYCLES
1173 | 0 << SPARE_SIZE_BYTES);
1174 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1175 | 0 << CS_ACTIVE_BSY
1176 | 17 << BAD_BLOCK_BYTE_NUM
1177 | 1 << BAD_BLOCK_IN_SPARE_AREA
1178 | 2 << WR_RD_BSY_GAP
1179 | 0 << WIDE_FLASH
1180 | 1 << DEV0_CFG1_ECC_DISABLE);
1181 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1182
1183
1184 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
1185 (nandc->vld & ~READ_START_VLD));
1186 nandc_set_reg(nandc, NAND_DEV_CMD1,
1187 (nandc->cmd1 & ~(0xFF << READ_ADDR))
1188 | NAND_CMD_PARAM << READ_ADDR);
1189
1190 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1191
1192 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1193 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1194 nandc_set_read_loc(nandc, 0, 0, 512, 1);
1195
1196 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1197 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1198
1199 nandc->buf_count = 512;
1200 memset(nandc->data_buffer, 0xff, nandc->buf_count);
1201
1202 config_nand_single_cw_page_read(nandc, false);
1203
1204 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1205 nandc->buf_count, 0);
1206
1207
1208 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1209 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1210
1211 return 0;
1212}
1213
1214
1215static int erase_block(struct qcom_nand_host *host, int page_addr)
1216{
1217 struct nand_chip *chip = &host->chip;
1218 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1219
1220 nandc_set_reg(nandc, NAND_FLASH_CMD,
1221 OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1222 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1223 nandc_set_reg(nandc, NAND_ADDR1, 0);
1224 nandc_set_reg(nandc, NAND_DEV0_CFG0,
1225 host->cfg0_raw & ~(7 << CW_PER_PAGE));
1226 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1227 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1228 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1229 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1230
1231 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1232 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1233 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1234
1235 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1236
1237 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1238 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1239
1240 return 0;
1241}
1242
1243
1244static int read_id(struct qcom_nand_host *host, int column)
1245{
1246 struct nand_chip *chip = &host->chip;
1247 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1248
1249 if (column == -1)
1250 return 0;
1251
1252 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
1253 nandc_set_reg(nandc, NAND_ADDR0, column);
1254 nandc_set_reg(nandc, NAND_ADDR1, 0);
1255 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1256 nandc->props->is_bam ? 0 : DM_EN);
1257 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1258
1259 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1260 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1261
1262 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1263
1264 return 0;
1265}
1266
1267
1268static int reset(struct qcom_nand_host *host)
1269{
1270 struct nand_chip *chip = &host->chip;
1271 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1272
1273 nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
1274 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1275
1276 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1277 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1278
1279 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1280
1281 return 0;
1282}
1283
1284
1285static int submit_descs(struct qcom_nand_controller *nandc)
1286{
1287 struct desc_info *desc;
1288 dma_cookie_t cookie = 0;
1289 struct bam_transaction *bam_txn = nandc->bam_txn;
1290 int r;
1291
1292 if (nandc->props->is_bam) {
1293 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1294 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1295 if (r)
1296 return r;
1297 }
1298
1299 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1300 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1301 DMA_PREP_INTERRUPT);
1302 if (r)
1303 return r;
1304 }
1305
1306 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1307 r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1308 DMA_PREP_CMD);
1309 if (r)
1310 return r;
1311 }
1312 }
1313
1314 list_for_each_entry(desc, &nandc->desc_list, node)
1315 cookie = dmaengine_submit(desc->dma_desc);
1316
1317 if (nandc->props->is_bam) {
1318 bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1319 bam_txn->last_cmd_desc->callback_param = bam_txn;
1320 if (bam_txn->last_data_desc) {
1321 bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1322 bam_txn->last_data_desc->callback_param = bam_txn;
1323 bam_txn->wait_second_completion = true;
1324 }
1325
1326 dma_async_issue_pending(nandc->tx_chan);
1327 dma_async_issue_pending(nandc->rx_chan);
1328 dma_async_issue_pending(nandc->cmd_chan);
1329
1330 if (!wait_for_completion_timeout(&bam_txn->txn_done,
1331 QPIC_NAND_COMPLETION_TIMEOUT))
1332 return -ETIMEDOUT;
1333 } else {
1334 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1335 return -ETIMEDOUT;
1336 }
1337
1338 return 0;
1339}
1340
1341static void free_descs(struct qcom_nand_controller *nandc)
1342{
1343 struct desc_info *desc, *n;
1344
1345 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1346 list_del(&desc->node);
1347
1348 if (nandc->props->is_bam)
1349 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1350 desc->sgl_cnt, desc->dir);
1351 else
1352 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1353 desc->dir);
1354
1355 kfree(desc);
1356 }
1357}
1358
1359
1360static void clear_read_regs(struct qcom_nand_controller *nandc)
1361{
1362 nandc->reg_read_pos = 0;
1363 nandc_read_buffer_sync(nandc, false);
1364}
1365
1366static void pre_command(struct qcom_nand_host *host, int command)
1367{
1368 struct nand_chip *chip = &host->chip;
1369 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1370
1371 nandc->buf_count = 0;
1372 nandc->buf_start = 0;
1373 host->use_ecc = false;
1374 host->last_command = command;
1375
1376 clear_read_regs(nandc);
1377
1378 if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1379 command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1380 clear_bam_transaction(nandc);
1381}
1382
1383
1384
1385
1386
1387
1388static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1389{
1390 struct nand_chip *chip = &host->chip;
1391 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1392 struct nand_ecc_ctrl *ecc = &chip->ecc;
1393 int num_cw;
1394 int i;
1395
1396 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1397 nandc_read_buffer_sync(nandc, true);
1398
1399 for (i = 0; i < num_cw; i++) {
1400 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1401
1402 if (flash_status & FS_MPU_ERR)
1403 host->status &= ~NAND_STATUS_WP;
1404
1405 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1406 (flash_status &
1407 FS_DEVICE_STS_ERR)))
1408 host->status |= NAND_STATUS_FAIL;
1409 }
1410}
1411
1412static void post_command(struct qcom_nand_host *host, int command)
1413{
1414 struct nand_chip *chip = &host->chip;
1415 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1416
1417 switch (command) {
1418 case NAND_CMD_READID:
1419 nandc_read_buffer_sync(nandc, true);
1420 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1421 nandc->buf_count);
1422 break;
1423 case NAND_CMD_PAGEPROG:
1424 case NAND_CMD_ERASE1:
1425 parse_erase_write_errors(host, command);
1426 break;
1427 default:
1428 break;
1429 }
1430}
1431
1432
1433
1434
1435
1436
1437
1438static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
1439 int column, int page_addr)
1440{
1441 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1442 struct nand_ecc_ctrl *ecc = &chip->ecc;
1443 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1444 bool wait = false;
1445 int ret = 0;
1446
1447 pre_command(host, command);
1448
1449 switch (command) {
1450 case NAND_CMD_RESET:
1451 ret = reset(host);
1452 wait = true;
1453 break;
1454
1455 case NAND_CMD_READID:
1456 nandc->buf_count = 4;
1457 ret = read_id(host, column);
1458 wait = true;
1459 break;
1460
1461 case NAND_CMD_PARAM:
1462 ret = nandc_param(host);
1463 wait = true;
1464 break;
1465
1466 case NAND_CMD_ERASE1:
1467 ret = erase_block(host, page_addr);
1468 wait = true;
1469 break;
1470
1471 case NAND_CMD_READ0:
1472
1473 WARN_ON(column != 0);
1474
1475 host->use_ecc = true;
1476 set_address(host, 0, page_addr);
1477 update_rw_regs(host, ecc->steps, true);
1478 break;
1479
1480 case NAND_CMD_SEQIN:
1481 WARN_ON(column != 0);
1482 set_address(host, 0, page_addr);
1483 break;
1484
1485 case NAND_CMD_PAGEPROG:
1486 case NAND_CMD_STATUS:
1487 case NAND_CMD_NONE:
1488 default:
1489 break;
1490 }
1491
1492 if (ret) {
1493 dev_err(nandc->dev, "failure executing command %d\n",
1494 command);
1495 free_descs(nandc);
1496 return;
1497 }
1498
1499 if (wait) {
1500 ret = submit_descs(nandc);
1501 if (ret)
1502 dev_err(nandc->dev,
1503 "failure submitting descs for command %d\n",
1504 command);
1505 }
1506
1507 free_descs(nandc);
1508
1509 post_command(host, command);
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1524{
1525 u8 empty1, empty2;
1526
1527
1528
1529
1530
1531
1532
1533 empty1 = data_buf[3];
1534 empty2 = data_buf[175];
1535
1536
1537
1538
1539
1540 if ((empty1 == 0x54 && empty2 == 0xff) ||
1541 (empty1 == 0xff && empty2 == 0x54)) {
1542 data_buf[3] = 0xff;
1543 data_buf[175] = 0xff;
1544 }
1545
1546
1547
1548
1549
1550 if (memchr_inv(data_buf, 0xff, data_len)) {
1551 data_buf[3] = empty1;
1552 data_buf[175] = empty2;
1553
1554 return false;
1555 }
1556
1557 return true;
1558}
1559
1560struct read_stats {
1561 __le32 flash;
1562 __le32 buffer;
1563 __le32 erased_cw;
1564};
1565
1566
1567static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1568{
1569 struct nand_chip *chip = &host->chip;
1570 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1571 int i;
1572
1573 for (i = 0; i < cw_cnt; i++) {
1574 u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1575
1576 if (flash & (FS_OP_ERR | FS_MPU_ERR))
1577 return -EIO;
1578 }
1579
1580 return 0;
1581}
1582
1583
1584static int
1585qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1586 u8 *data_buf, u8 *oob_buf, int page, int cw)
1587{
1588 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1589 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1590 struct nand_ecc_ctrl *ecc = &chip->ecc;
1591 int data_size1, data_size2, oob_size1, oob_size2;
1592 int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1593
1594 nand_read_page_op(chip, page, 0, NULL, 0);
1595 host->use_ecc = false;
1596
1597 clear_bam_transaction(nandc);
1598 set_address(host, host->cw_size * cw, page);
1599 update_rw_regs(host, 1, true);
1600 config_nand_page_read(nandc);
1601
1602 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1603 oob_size1 = host->bbm_size;
1604
1605 if (cw == (ecc->steps - 1)) {
1606 data_size2 = ecc->size - data_size1 -
1607 ((ecc->steps - 1) * 4);
1608 oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1609 host->spare_bytes;
1610 } else {
1611 data_size2 = host->cw_data - data_size1;
1612 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1613 }
1614
1615 if (nandc->props->is_bam) {
1616 nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1617 read_loc += data_size1;
1618
1619 nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1620 read_loc += oob_size1;
1621
1622 nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1623 read_loc += data_size2;
1624
1625 nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1626 }
1627
1628 config_nand_cw_read(nandc, false);
1629
1630 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1631 reg_off += data_size1;
1632
1633 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1634 reg_off += oob_size1;
1635
1636 read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1637 reg_off += data_size2;
1638
1639 read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1640
1641 ret = submit_descs(nandc);
1642 free_descs(nandc);
1643 if (ret) {
1644 dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1645 return ret;
1646 }
1647
1648 return check_flash_errors(host, 1);
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666static int
1667check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1668 u8 *oob_buf, unsigned long uncorrectable_cws,
1669 int page, unsigned int max_bitflips)
1670{
1671 struct nand_chip *chip = &host->chip;
1672 struct mtd_info *mtd = nand_to_mtd(chip);
1673 struct nand_ecc_ctrl *ecc = &chip->ecc;
1674 u8 *cw_data_buf, *cw_oob_buf;
1675 int cw, data_size, oob_size, ret = 0;
1676
1677 if (!data_buf)
1678 data_buf = nand_get_data_buf(chip);
1679
1680 if (!oob_buf) {
1681 nand_get_data_buf(chip);
1682 oob_buf = chip->oob_poi;
1683 }
1684
1685 for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1686 if (cw == (ecc->steps - 1)) {
1687 data_size = ecc->size - ((ecc->steps - 1) * 4);
1688 oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1689 } else {
1690 data_size = host->cw_data;
1691 oob_size = host->ecc_bytes_hw;
1692 }
1693
1694
1695 cw_data_buf = data_buf + (cw * host->cw_data);
1696 cw_oob_buf = oob_buf + (cw * ecc->bytes);
1697
1698 ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1699 cw_oob_buf, page, cw);
1700 if (ret)
1701 return ret;
1702
1703
1704
1705
1706
1707 ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1708 cw_oob_buf + host->bbm_size,
1709 oob_size, NULL,
1710 0, ecc->strength);
1711 if (ret < 0) {
1712 mtd->ecc_stats.failed++;
1713 } else {
1714 mtd->ecc_stats.corrected += ret;
1715 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1716 }
1717 }
1718
1719 return max_bitflips;
1720}
1721
1722
1723
1724
1725
1726static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1727 u8 *oob_buf, int page)
1728{
1729 struct nand_chip *chip = &host->chip;
1730 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1731 struct mtd_info *mtd = nand_to_mtd(chip);
1732 struct nand_ecc_ctrl *ecc = &chip->ecc;
1733 unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1734 struct read_stats *buf;
1735 bool flash_op_err = false, erased;
1736 int i;
1737 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1738
1739 buf = (struct read_stats *)nandc->reg_read_buf;
1740 nandc_read_buffer_sync(nandc, true);
1741
1742 for (i = 0; i < ecc->steps; i++, buf++) {
1743 u32 flash, buffer, erased_cw;
1744 int data_len, oob_len;
1745
1746 if (i == (ecc->steps - 1)) {
1747 data_len = ecc->size - ((ecc->steps - 1) << 2);
1748 oob_len = ecc->steps << 2;
1749 } else {
1750 data_len = host->cw_data;
1751 oob_len = 0;
1752 }
1753
1754 flash = le32_to_cpu(buf->flash);
1755 buffer = le32_to_cpu(buf->buffer);
1756 erased_cw = le32_to_cpu(buf->erased_cw);
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766 if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1767
1768
1769
1770
1771 if (host->bch_enabled) {
1772 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1773 true : false;
1774
1775
1776
1777
1778
1779
1780 } else if (data_buf) {
1781 erased = erased_chunk_check_and_fixup(data_buf,
1782 data_len);
1783 } else {
1784 erased = false;
1785 }
1786
1787 if (!erased)
1788 uncorrectable_cws |= BIT(i);
1789
1790
1791
1792
1793
1794
1795 } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1796 flash_op_err = true;
1797
1798
1799
1800
1801 } else {
1802 unsigned int stat;
1803
1804 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1805 mtd->ecc_stats.corrected += stat;
1806 max_bitflips = max(max_bitflips, stat);
1807 }
1808
1809 if (data_buf)
1810 data_buf += data_len;
1811 if (oob_buf)
1812 oob_buf += oob_len + ecc->bytes;
1813 }
1814
1815 if (flash_op_err)
1816 return -EIO;
1817
1818 if (!uncorrectable_cws)
1819 return max_bitflips;
1820
1821 return check_for_erased_page(host, data_buf_start, oob_buf_start,
1822 uncorrectable_cws, page,
1823 max_bitflips);
1824}
1825
1826
1827
1828
1829
1830static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1831 u8 *oob_buf, int page)
1832{
1833 struct nand_chip *chip = &host->chip;
1834 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1835 struct nand_ecc_ctrl *ecc = &chip->ecc;
1836 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1837 int i, ret;
1838
1839 config_nand_page_read(nandc);
1840
1841
1842 for (i = 0; i < ecc->steps; i++) {
1843 int data_size, oob_size;
1844
1845 if (i == (ecc->steps - 1)) {
1846 data_size = ecc->size - ((ecc->steps - 1) << 2);
1847 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1848 host->spare_bytes;
1849 } else {
1850 data_size = host->cw_data;
1851 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1852 }
1853
1854 if (nandc->props->is_bam) {
1855 if (data_buf && oob_buf) {
1856 nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1857 nandc_set_read_loc(nandc, 1, data_size,
1858 oob_size, 1);
1859 } else if (data_buf) {
1860 nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1861 } else {
1862 nandc_set_read_loc(nandc, 0, data_size,
1863 oob_size, 1);
1864 }
1865 }
1866
1867 config_nand_cw_read(nandc, true);
1868
1869 if (data_buf)
1870 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1871 data_size, 0);
1872
1873
1874
1875
1876
1877
1878
1879
1880 if (oob_buf) {
1881 int j;
1882
1883 for (j = 0; j < host->bbm_size; j++)
1884 *oob_buf++ = 0xff;
1885
1886 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1887 oob_buf, oob_size, 0);
1888 }
1889
1890 if (data_buf)
1891 data_buf += data_size;
1892 if (oob_buf)
1893 oob_buf += oob_size;
1894 }
1895
1896 ret = submit_descs(nandc);
1897 free_descs(nandc);
1898
1899 if (ret) {
1900 dev_err(nandc->dev, "failure to read page/oob\n");
1901 return ret;
1902 }
1903
1904 return parse_read_errors(host, data_buf_start, oob_buf_start, page);
1905}
1906
1907
1908
1909
1910
1911static int copy_last_cw(struct qcom_nand_host *host, int page)
1912{
1913 struct nand_chip *chip = &host->chip;
1914 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1915 struct nand_ecc_ctrl *ecc = &chip->ecc;
1916 int size;
1917 int ret;
1918
1919 clear_read_regs(nandc);
1920
1921 size = host->use_ecc ? host->cw_data : host->cw_size;
1922
1923
1924 memset(nandc->data_buffer, 0xff, size);
1925
1926 set_address(host, host->cw_size * (ecc->steps - 1), page);
1927 update_rw_regs(host, 1, true);
1928
1929 config_nand_single_cw_page_read(nandc, host->use_ecc);
1930
1931 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
1932
1933 ret = submit_descs(nandc);
1934 if (ret)
1935 dev_err(nandc->dev, "failed to copy last codeword\n");
1936
1937 free_descs(nandc);
1938
1939 return ret;
1940}
1941
1942
1943static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
1944 int oob_required, int page)
1945{
1946 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1947 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1948 u8 *data_buf, *oob_buf = NULL;
1949
1950 nand_read_page_op(chip, page, 0, NULL, 0);
1951 data_buf = buf;
1952 oob_buf = oob_required ? chip->oob_poi : NULL;
1953
1954 clear_bam_transaction(nandc);
1955
1956 return read_page_ecc(host, data_buf, oob_buf, page);
1957}
1958
1959
1960static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1961 int oob_required, int page)
1962{
1963 struct mtd_info *mtd = nand_to_mtd(chip);
1964 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1965 struct nand_ecc_ctrl *ecc = &chip->ecc;
1966 int cw, ret;
1967 u8 *data_buf = buf, *oob_buf = chip->oob_poi;
1968
1969 for (cw = 0; cw < ecc->steps; cw++) {
1970 ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
1971 page, cw);
1972 if (ret)
1973 return ret;
1974
1975 data_buf += host->cw_data;
1976 oob_buf += ecc->bytes;
1977 }
1978
1979 return 0;
1980}
1981
1982
1983static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
1984{
1985 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1986 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1987 struct nand_ecc_ctrl *ecc = &chip->ecc;
1988
1989 clear_read_regs(nandc);
1990 clear_bam_transaction(nandc);
1991
1992 host->use_ecc = true;
1993 set_address(host, 0, page);
1994 update_rw_regs(host, ecc->steps, true);
1995
1996 return read_page_ecc(host, NULL, chip->oob_poi, page);
1997}
1998
1999
2000static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
2001 int oob_required, int page)
2002{
2003 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2004 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2005 struct nand_ecc_ctrl *ecc = &chip->ecc;
2006 u8 *data_buf, *oob_buf;
2007 int i, ret;
2008
2009 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2010
2011 clear_read_regs(nandc);
2012 clear_bam_transaction(nandc);
2013
2014 data_buf = (u8 *)buf;
2015 oob_buf = chip->oob_poi;
2016
2017 host->use_ecc = true;
2018 update_rw_regs(host, ecc->steps, false);
2019 config_nand_page_write(nandc);
2020
2021 for (i = 0; i < ecc->steps; i++) {
2022 int data_size, oob_size;
2023
2024 if (i == (ecc->steps - 1)) {
2025 data_size = ecc->size - ((ecc->steps - 1) << 2);
2026 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
2027 host->spare_bytes;
2028 } else {
2029 data_size = host->cw_data;
2030 oob_size = ecc->bytes;
2031 }
2032
2033
2034 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
2035 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2036
2037
2038
2039
2040
2041
2042
2043
2044 if (i == (ecc->steps - 1)) {
2045 oob_buf += host->bbm_size;
2046
2047 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2048 oob_buf, oob_size, 0);
2049 }
2050
2051 config_nand_cw_write(nandc);
2052
2053 data_buf += data_size;
2054 oob_buf += oob_size;
2055 }
2056
2057 ret = submit_descs(nandc);
2058 if (ret)
2059 dev_err(nandc->dev, "failure to write page\n");
2060
2061 free_descs(nandc);
2062
2063 if (!ret)
2064 ret = nand_prog_page_end_op(chip);
2065
2066 return ret;
2067}
2068
2069
2070static int qcom_nandc_write_page_raw(struct nand_chip *chip,
2071 const uint8_t *buf, int oob_required,
2072 int page)
2073{
2074 struct mtd_info *mtd = nand_to_mtd(chip);
2075 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2076 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2077 struct nand_ecc_ctrl *ecc = &chip->ecc;
2078 u8 *data_buf, *oob_buf;
2079 int i, ret;
2080
2081 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2082 clear_read_regs(nandc);
2083 clear_bam_transaction(nandc);
2084
2085 data_buf = (u8 *)buf;
2086 oob_buf = chip->oob_poi;
2087
2088 host->use_ecc = false;
2089 update_rw_regs(host, ecc->steps, false);
2090 config_nand_page_write(nandc);
2091
2092 for (i = 0; i < ecc->steps; i++) {
2093 int data_size1, data_size2, oob_size1, oob_size2;
2094 int reg_off = FLASH_BUF_ACC;
2095
2096 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2097 oob_size1 = host->bbm_size;
2098
2099 if (i == (ecc->steps - 1)) {
2100 data_size2 = ecc->size - data_size1 -
2101 ((ecc->steps - 1) << 2);
2102 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2103 host->spare_bytes;
2104 } else {
2105 data_size2 = host->cw_data - data_size1;
2106 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2107 }
2108
2109 write_data_dma(nandc, reg_off, data_buf, data_size1,
2110 NAND_BAM_NO_EOT);
2111 reg_off += data_size1;
2112 data_buf += data_size1;
2113
2114 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2115 NAND_BAM_NO_EOT);
2116 reg_off += oob_size1;
2117 oob_buf += oob_size1;
2118
2119 write_data_dma(nandc, reg_off, data_buf, data_size2,
2120 NAND_BAM_NO_EOT);
2121 reg_off += data_size2;
2122 data_buf += data_size2;
2123
2124 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2125 oob_buf += oob_size2;
2126
2127 config_nand_cw_write(nandc);
2128 }
2129
2130 ret = submit_descs(nandc);
2131 if (ret)
2132 dev_err(nandc->dev, "failure to write raw page\n");
2133
2134 free_descs(nandc);
2135
2136 if (!ret)
2137 ret = nand_prog_page_end_op(chip);
2138
2139 return ret;
2140}
2141
2142
2143
2144
2145
2146
2147
2148
2149static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
2150{
2151 struct mtd_info *mtd = nand_to_mtd(chip);
2152 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2153 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2154 struct nand_ecc_ctrl *ecc = &chip->ecc;
2155 u8 *oob = chip->oob_poi;
2156 int data_size, oob_size;
2157 int ret;
2158
2159 host->use_ecc = true;
2160 clear_bam_transaction(nandc);
2161
2162
2163 data_size = ecc->size - ((ecc->steps - 1) << 2);
2164 oob_size = mtd->oobavail;
2165
2166 memset(nandc->data_buffer, 0xff, host->cw_data);
2167
2168 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2169 0, mtd->oobavail);
2170
2171 set_address(host, host->cw_size * (ecc->steps - 1), page);
2172 update_rw_regs(host, 1, false);
2173
2174 config_nand_page_write(nandc);
2175 write_data_dma(nandc, FLASH_BUF_ACC,
2176 nandc->data_buffer, data_size + oob_size, 0);
2177 config_nand_cw_write(nandc);
2178
2179 ret = submit_descs(nandc);
2180
2181 free_descs(nandc);
2182
2183 if (ret) {
2184 dev_err(nandc->dev, "failure to write oob\n");
2185 return -EIO;
2186 }
2187
2188 return nand_prog_page_end_op(chip);
2189}
2190
2191static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
2192{
2193 struct mtd_info *mtd = nand_to_mtd(chip);
2194 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2195 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2196 struct nand_ecc_ctrl *ecc = &chip->ecc;
2197 int page, ret, bbpos, bad = 0;
2198
2199 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2200
2201
2202
2203
2204
2205
2206
2207 host->use_ecc = false;
2208
2209 clear_bam_transaction(nandc);
2210 ret = copy_last_cw(host, page);
2211 if (ret)
2212 goto err;
2213
2214 if (check_flash_errors(host, 1)) {
2215 dev_warn(nandc->dev, "error when trying to read BBM\n");
2216 goto err;
2217 }
2218
2219 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2220
2221 bad = nandc->data_buffer[bbpos] != 0xff;
2222
2223 if (chip->options & NAND_BUSWIDTH_16)
2224 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2225err:
2226 return bad;
2227}
2228
2229static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
2230{
2231 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2232 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2233 struct nand_ecc_ctrl *ecc = &chip->ecc;
2234 int page, ret;
2235
2236 clear_read_regs(nandc);
2237 clear_bam_transaction(nandc);
2238
2239
2240
2241
2242
2243
2244 memset(nandc->data_buffer, 0x00, host->cw_size);
2245
2246 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2247
2248
2249 host->use_ecc = false;
2250 set_address(host, host->cw_size * (ecc->steps - 1), page);
2251 update_rw_regs(host, 1, false);
2252
2253 config_nand_page_write(nandc);
2254 write_data_dma(nandc, FLASH_BUF_ACC,
2255 nandc->data_buffer, host->cw_size, 0);
2256 config_nand_cw_write(nandc);
2257
2258 ret = submit_descs(nandc);
2259
2260 free_descs(nandc);
2261
2262 if (ret) {
2263 dev_err(nandc->dev, "failure to update BBM\n");
2264 return -EIO;
2265 }
2266
2267 return nand_prog_page_end_op(chip);
2268}
2269
2270
2271
2272
2273
2274
2275
2276static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
2277{
2278 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2279 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2280 u8 *buf = nandc->data_buffer;
2281 u8 ret = 0x0;
2282
2283 if (host->last_command == NAND_CMD_STATUS) {
2284 ret = host->status;
2285
2286 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2287
2288 return ret;
2289 }
2290
2291 if (nandc->buf_start < nandc->buf_count)
2292 ret = buf[nandc->buf_start++];
2293
2294 return ret;
2295}
2296
2297static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
2298{
2299 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2300 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2301
2302 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2303 nandc->buf_start += real_len;
2304}
2305
2306static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
2307 int len)
2308{
2309 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2310 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2311
2312 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2313
2314 nandc->buf_start += real_len;
2315}
2316
2317
2318static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
2319{
2320 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2321
2322 if (chipnr <= 0)
2323 return;
2324
2325 dev_warn(nandc->dev, "invalid chip select\n");
2326}
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2414 struct mtd_oob_region *oobregion)
2415{
2416 struct nand_chip *chip = mtd_to_nand(mtd);
2417 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2418 struct nand_ecc_ctrl *ecc = &chip->ecc;
2419
2420 if (section > 1)
2421 return -ERANGE;
2422
2423 if (!section) {
2424 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2425 host->bbm_size;
2426 oobregion->offset = 0;
2427 } else {
2428 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2429 oobregion->offset = mtd->oobsize - oobregion->length;
2430 }
2431
2432 return 0;
2433}
2434
2435static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2436 struct mtd_oob_region *oobregion)
2437{
2438 struct nand_chip *chip = mtd_to_nand(mtd);
2439 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2440 struct nand_ecc_ctrl *ecc = &chip->ecc;
2441
2442 if (section)
2443 return -ERANGE;
2444
2445 oobregion->length = ecc->steps * 4;
2446 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2447
2448 return 0;
2449}
2450
2451static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2452 .ecc = qcom_nand_ooblayout_ecc,
2453 .free = qcom_nand_ooblayout_free,
2454};
2455
2456static int
2457qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2458{
2459 return strength == 4 ? 12 : 16;
2460}
2461NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2462 NANDC_STEP_SIZE, 4, 8);
2463
2464static int qcom_nand_attach_chip(struct nand_chip *chip)
2465{
2466 struct mtd_info *mtd = nand_to_mtd(chip);
2467 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2468 struct nand_ecc_ctrl *ecc = &chip->ecc;
2469 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2470 int cwperpage, bad_block_byte, ret;
2471 bool wide_bus;
2472 int ecc_mode = 1;
2473
2474
2475 ecc->size = NANDC_STEP_SIZE;
2476 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2477 cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2478
2479
2480
2481
2482
2483 ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2484 mtd->oobsize - (cwperpage * 4));
2485 if (ret) {
2486 dev_err(nandc->dev, "No valid ECC settings possible\n");
2487 return ret;
2488 }
2489
2490 if (ecc->strength >= 8) {
2491
2492 host->bch_enabled = true;
2493 ecc_mode = 1;
2494
2495 if (wide_bus) {
2496 host->ecc_bytes_hw = 14;
2497 host->spare_bytes = 0;
2498 host->bbm_size = 2;
2499 } else {
2500 host->ecc_bytes_hw = 13;
2501 host->spare_bytes = 2;
2502 host->bbm_size = 1;
2503 }
2504 } else {
2505
2506
2507
2508
2509
2510 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2511
2512 host->bch_enabled = true;
2513 ecc_mode = 0;
2514
2515 if (wide_bus) {
2516 host->ecc_bytes_hw = 8;
2517 host->spare_bytes = 2;
2518 host->bbm_size = 2;
2519 } else {
2520 host->ecc_bytes_hw = 7;
2521 host->spare_bytes = 4;
2522 host->bbm_size = 1;
2523 }
2524 } else {
2525
2526 host->ecc_bytes_hw = 10;
2527
2528 if (wide_bus) {
2529 host->spare_bytes = 0;
2530 host->bbm_size = 2;
2531 } else {
2532 host->spare_bytes = 1;
2533 host->bbm_size = 1;
2534 }
2535 }
2536 }
2537
2538
2539
2540
2541
2542
2543
2544 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2545
2546 ecc->read_page = qcom_nandc_read_page;
2547 ecc->read_page_raw = qcom_nandc_read_page_raw;
2548 ecc->read_oob = qcom_nandc_read_oob;
2549 ecc->write_page = qcom_nandc_write_page;
2550 ecc->write_page_raw = qcom_nandc_write_page_raw;
2551 ecc->write_oob = qcom_nandc_write_oob;
2552
2553 ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2554
2555 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2556
2557 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2558 cwperpage);
2559
2560
2561
2562
2563
2564
2565 host->cw_data = 516;
2566
2567
2568
2569
2570
2571 host->cw_size = host->cw_data + ecc->bytes;
2572 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2573
2574 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2575 | host->cw_data << UD_SIZE_BYTES
2576 | 0 << DISABLE_STATUS_AFTER_WRITE
2577 | 5 << NUM_ADDR_CYCLES
2578 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2579 | 0 << STATUS_BFR_READ
2580 | 1 << SET_RD_MODE_AFTER_STATUS
2581 | host->spare_bytes << SPARE_SIZE_BYTES;
2582
2583 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2584 | 0 << CS_ACTIVE_BSY
2585 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2586 | 0 << BAD_BLOCK_IN_SPARE_AREA
2587 | 2 << WR_RD_BSY_GAP
2588 | wide_bus << WIDE_FLASH
2589 | host->bch_enabled << ENABLE_BCH_ECC;
2590
2591 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2592 | host->cw_size << UD_SIZE_BYTES
2593 | 5 << NUM_ADDR_CYCLES
2594 | 0 << SPARE_SIZE_BYTES;
2595
2596 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2597 | 0 << CS_ACTIVE_BSY
2598 | 17 << BAD_BLOCK_BYTE_NUM
2599 | 1 << BAD_BLOCK_IN_SPARE_AREA
2600 | 2 << WR_RD_BSY_GAP
2601 | wide_bus << WIDE_FLASH
2602 | 1 << DEV0_CFG1_ECC_DISABLE;
2603
2604 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2605 | 0 << ECC_SW_RESET
2606 | host->cw_data << ECC_NUM_DATA_BYTES
2607 | 1 << ECC_FORCE_CLK_OPEN
2608 | ecc_mode << ECC_MODE
2609 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2610
2611 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2612
2613 host->clrflashstatus = FS_READY_BSY_N;
2614 host->clrreadstatus = 0xc0;
2615 nandc->regs->erased_cw_detect_cfg_clr =
2616 cpu_to_le32(CLR_ERASED_PAGE_DET);
2617 nandc->regs->erased_cw_detect_cfg_set =
2618 cpu_to_le32(SET_ERASED_PAGE_DET);
2619
2620 dev_dbg(nandc->dev,
2621 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2622 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2623 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2624 cwperpage);
2625
2626 return 0;
2627}
2628
2629static const struct nand_controller_ops qcom_nandc_ops = {
2630 .attach_chip = qcom_nand_attach_chip,
2631};
2632
2633static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2634{
2635 if (nandc->props->is_bam) {
2636 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2637 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2638 MAX_REG_RD *
2639 sizeof(*nandc->reg_read_buf),
2640 DMA_FROM_DEVICE);
2641
2642 if (nandc->tx_chan)
2643 dma_release_channel(nandc->tx_chan);
2644
2645 if (nandc->rx_chan)
2646 dma_release_channel(nandc->rx_chan);
2647
2648 if (nandc->cmd_chan)
2649 dma_release_channel(nandc->cmd_chan);
2650 } else {
2651 if (nandc->chan)
2652 dma_release_channel(nandc->chan);
2653 }
2654}
2655
2656static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2657{
2658 int ret;
2659
2660 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2661 if (ret) {
2662 dev_err(nandc->dev, "failed to set DMA mask\n");
2663 return ret;
2664 }
2665
2666
2667
2668
2669
2670
2671
2672 nandc->buf_size = 532;
2673
2674 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2675 GFP_KERNEL);
2676 if (!nandc->data_buffer)
2677 return -ENOMEM;
2678
2679 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2680 GFP_KERNEL);
2681 if (!nandc->regs)
2682 return -ENOMEM;
2683
2684 nandc->reg_read_buf = devm_kcalloc(nandc->dev,
2685 MAX_REG_RD, sizeof(*nandc->reg_read_buf),
2686 GFP_KERNEL);
2687 if (!nandc->reg_read_buf)
2688 return -ENOMEM;
2689
2690 if (nandc->props->is_bam) {
2691 nandc->reg_read_dma =
2692 dma_map_single(nandc->dev, nandc->reg_read_buf,
2693 MAX_REG_RD *
2694 sizeof(*nandc->reg_read_buf),
2695 DMA_FROM_DEVICE);
2696 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2697 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2698 return -EIO;
2699 }
2700
2701 nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
2702 if (IS_ERR(nandc->tx_chan)) {
2703 ret = PTR_ERR(nandc->tx_chan);
2704 nandc->tx_chan = NULL;
2705 dev_err_probe(nandc->dev, ret,
2706 "tx DMA channel request failed\n");
2707 goto unalloc;
2708 }
2709
2710 nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
2711 if (IS_ERR(nandc->rx_chan)) {
2712 ret = PTR_ERR(nandc->rx_chan);
2713 nandc->rx_chan = NULL;
2714 dev_err_probe(nandc->dev, ret,
2715 "rx DMA channel request failed\n");
2716 goto unalloc;
2717 }
2718
2719 nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
2720 if (IS_ERR(nandc->cmd_chan)) {
2721 ret = PTR_ERR(nandc->cmd_chan);
2722 nandc->cmd_chan = NULL;
2723 dev_err_probe(nandc->dev, ret,
2724 "cmd DMA channel request failed\n");
2725 goto unalloc;
2726 }
2727
2728
2729
2730
2731
2732
2733
2734 nandc->max_cwperpage = 1;
2735 nandc->bam_txn = alloc_bam_transaction(nandc);
2736 if (!nandc->bam_txn) {
2737 dev_err(nandc->dev,
2738 "failed to allocate bam transaction\n");
2739 ret = -ENOMEM;
2740 goto unalloc;
2741 }
2742 } else {
2743 nandc->chan = dma_request_chan(nandc->dev, "rxtx");
2744 if (IS_ERR(nandc->chan)) {
2745 ret = PTR_ERR(nandc->chan);
2746 nandc->chan = NULL;
2747 dev_err_probe(nandc->dev, ret,
2748 "rxtx DMA channel request failed\n");
2749 return ret;
2750 }
2751 }
2752
2753 INIT_LIST_HEAD(&nandc->desc_list);
2754 INIT_LIST_HEAD(&nandc->host_list);
2755
2756 nand_controller_init(&nandc->controller);
2757 nandc->controller.ops = &qcom_nandc_ops;
2758
2759 return 0;
2760unalloc:
2761 qcom_nandc_unalloc(nandc);
2762 return ret;
2763}
2764
2765
2766static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2767{
2768 u32 nand_ctrl;
2769
2770
2771 if (!nandc->props->is_qpic)
2772 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2773 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2774 NAND_DEV_CMD_VLD_VAL);
2775
2776
2777 if (nandc->props->is_bam) {
2778 nand_ctrl = nandc_read(nandc, NAND_CTRL);
2779
2780
2781
2782
2783
2784
2785
2786
2787 if (!(nand_ctrl & BAM_MODE_EN))
2788 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2789 } else {
2790 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2791 }
2792
2793
2794 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2795 nandc->vld = NAND_DEV_CMD_VLD_VAL;
2796
2797 return 0;
2798}
2799
2800static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2801 struct qcom_nand_host *host,
2802 struct device_node *dn)
2803{
2804 struct nand_chip *chip = &host->chip;
2805 struct mtd_info *mtd = nand_to_mtd(chip);
2806 struct device *dev = nandc->dev;
2807 int ret;
2808
2809 ret = of_property_read_u32(dn, "reg", &host->cs);
2810 if (ret) {
2811 dev_err(dev, "can't get chip-select\n");
2812 return -ENXIO;
2813 }
2814
2815 nand_set_flash_node(chip, dn);
2816 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2817 if (!mtd->name)
2818 return -ENOMEM;
2819
2820 mtd->owner = THIS_MODULE;
2821 mtd->dev.parent = dev;
2822
2823 chip->legacy.cmdfunc = qcom_nandc_command;
2824 chip->legacy.select_chip = qcom_nandc_select_chip;
2825 chip->legacy.read_byte = qcom_nandc_read_byte;
2826 chip->legacy.read_buf = qcom_nandc_read_buf;
2827 chip->legacy.write_buf = qcom_nandc_write_buf;
2828 chip->legacy.set_features = nand_get_set_features_notsupp;
2829 chip->legacy.get_features = nand_get_set_features_notsupp;
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839 chip->legacy.block_bad = qcom_nandc_block_bad;
2840 chip->legacy.block_markbad = qcom_nandc_block_markbad;
2841
2842 chip->controller = &nandc->controller;
2843 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
2844 NAND_SKIP_BBTSCAN;
2845
2846
2847 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2848
2849 ret = nand_scan(chip, 1);
2850 if (ret)
2851 return ret;
2852
2853 if (nandc->props->is_bam) {
2854 free_bam_transaction(nandc);
2855 nandc->bam_txn = alloc_bam_transaction(nandc);
2856 if (!nandc->bam_txn) {
2857 dev_err(nandc->dev,
2858 "failed to allocate bam transaction\n");
2859 return -ENOMEM;
2860 }
2861 }
2862
2863 ret = mtd_device_register(mtd, NULL, 0);
2864 if (ret)
2865 nand_cleanup(chip);
2866
2867 return ret;
2868}
2869
2870static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2871{
2872 struct device *dev = nandc->dev;
2873 struct device_node *dn = dev->of_node, *child;
2874 struct qcom_nand_host *host;
2875 int ret;
2876
2877 for_each_available_child_of_node(dn, child) {
2878 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2879 if (!host) {
2880 of_node_put(child);
2881 return -ENOMEM;
2882 }
2883
2884 ret = qcom_nand_host_init_and_register(nandc, host, child);
2885 if (ret) {
2886 devm_kfree(dev, host);
2887 continue;
2888 }
2889
2890 list_add_tail(&host->node, &nandc->host_list);
2891 }
2892
2893 if (list_empty(&nandc->host_list))
2894 return -ENODEV;
2895
2896 return 0;
2897}
2898
2899
2900static int qcom_nandc_parse_dt(struct platform_device *pdev)
2901{
2902 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2903 struct device_node *np = nandc->dev->of_node;
2904 int ret;
2905
2906 if (!nandc->props->is_bam) {
2907 ret = of_property_read_u32(np, "qcom,cmd-crci",
2908 &nandc->cmd_crci);
2909 if (ret) {
2910 dev_err(nandc->dev, "command CRCI unspecified\n");
2911 return ret;
2912 }
2913
2914 ret = of_property_read_u32(np, "qcom,data-crci",
2915 &nandc->data_crci);
2916 if (ret) {
2917 dev_err(nandc->dev, "data CRCI unspecified\n");
2918 return ret;
2919 }
2920 }
2921
2922 return 0;
2923}
2924
2925static int qcom_nandc_probe(struct platform_device *pdev)
2926{
2927 struct qcom_nand_controller *nandc;
2928 const void *dev_data;
2929 struct device *dev = &pdev->dev;
2930 struct resource *res;
2931 int ret;
2932
2933 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2934 if (!nandc)
2935 return -ENOMEM;
2936
2937 platform_set_drvdata(pdev, nandc);
2938 nandc->dev = dev;
2939
2940 dev_data = of_device_get_match_data(dev);
2941 if (!dev_data) {
2942 dev_err(&pdev->dev, "failed to get device data\n");
2943 return -ENODEV;
2944 }
2945
2946 nandc->props = dev_data;
2947
2948 nandc->core_clk = devm_clk_get(dev, "core");
2949 if (IS_ERR(nandc->core_clk))
2950 return PTR_ERR(nandc->core_clk);
2951
2952 nandc->aon_clk = devm_clk_get(dev, "aon");
2953 if (IS_ERR(nandc->aon_clk))
2954 return PTR_ERR(nandc->aon_clk);
2955
2956 ret = qcom_nandc_parse_dt(pdev);
2957 if (ret)
2958 return ret;
2959
2960 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2961 nandc->base = devm_ioremap_resource(dev, res);
2962 if (IS_ERR(nandc->base))
2963 return PTR_ERR(nandc->base);
2964
2965 nandc->base_phys = res->start;
2966 nandc->base_dma = dma_map_resource(dev, res->start,
2967 resource_size(res),
2968 DMA_BIDIRECTIONAL, 0);
2969 if (!nandc->base_dma)
2970 return -ENXIO;
2971
2972 ret = qcom_nandc_alloc(nandc);
2973 if (ret)
2974 goto err_nandc_alloc;
2975
2976 ret = clk_prepare_enable(nandc->core_clk);
2977 if (ret)
2978 goto err_core_clk;
2979
2980 ret = clk_prepare_enable(nandc->aon_clk);
2981 if (ret)
2982 goto err_aon_clk;
2983
2984 ret = qcom_nandc_setup(nandc);
2985 if (ret)
2986 goto err_setup;
2987
2988 ret = qcom_probe_nand_devices(nandc);
2989 if (ret)
2990 goto err_setup;
2991
2992 return 0;
2993
2994err_setup:
2995 clk_disable_unprepare(nandc->aon_clk);
2996err_aon_clk:
2997 clk_disable_unprepare(nandc->core_clk);
2998err_core_clk:
2999 qcom_nandc_unalloc(nandc);
3000err_nandc_alloc:
3001 dma_unmap_resource(dev, res->start, resource_size(res),
3002 DMA_BIDIRECTIONAL, 0);
3003
3004 return ret;
3005}
3006
3007static int qcom_nandc_remove(struct platform_device *pdev)
3008{
3009 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3010 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3011 struct qcom_nand_host *host;
3012 struct nand_chip *chip;
3013 int ret;
3014
3015 list_for_each_entry(host, &nandc->host_list, node) {
3016 chip = &host->chip;
3017 ret = mtd_device_unregister(nand_to_mtd(chip));
3018 WARN_ON(ret);
3019 nand_cleanup(chip);
3020 }
3021
3022 qcom_nandc_unalloc(nandc);
3023
3024 clk_disable_unprepare(nandc->aon_clk);
3025 clk_disable_unprepare(nandc->core_clk);
3026
3027 dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
3028 DMA_BIDIRECTIONAL, 0);
3029
3030 return 0;
3031}
3032
3033static const struct qcom_nandc_props ipq806x_nandc_props = {
3034 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3035 .is_bam = false,
3036 .dev_cmd_reg_start = 0x0,
3037};
3038
3039static const struct qcom_nandc_props ipq4019_nandc_props = {
3040 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3041 .is_bam = true,
3042 .is_qpic = true,
3043 .dev_cmd_reg_start = 0x0,
3044};
3045
3046static const struct qcom_nandc_props ipq8074_nandc_props = {
3047 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3048 .is_bam = true,
3049 .is_qpic = true,
3050 .dev_cmd_reg_start = 0x7000,
3051};
3052
3053
3054
3055
3056
3057static const struct of_device_id qcom_nandc_of_match[] = {
3058 {
3059 .compatible = "qcom,ipq806x-nand",
3060 .data = &ipq806x_nandc_props,
3061 },
3062 {
3063 .compatible = "qcom,ipq4019-nand",
3064 .data = &ipq4019_nandc_props,
3065 },
3066 {
3067 .compatible = "qcom,ipq8074-nand",
3068 .data = &ipq8074_nandc_props,
3069 },
3070 {}
3071};
3072MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3073
3074static struct platform_driver qcom_nandc_driver = {
3075 .driver = {
3076 .name = "qcom-nandc",
3077 .of_match_table = qcom_nandc_of_match,
3078 },
3079 .probe = qcom_nandc_probe,
3080 .remove = qcom_nandc_remove,
3081};
3082module_platform_driver(qcom_nandc_driver);
3083
3084MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3085MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3086MODULE_LICENSE("GPL v2");
3087