1
2
3
4
5
6#include <linux/clk.h>
7#include <linux/slab.h>
8#include <linux/bitops.h>
9#include <linux/dma-mapping.h>
10#include <linux/dmaengine.h>
11#include <linux/module.h>
12#include <linux/mtd/rawnand.h>
13#include <linux/mtd/partitions.h>
14#include <linux/of.h>
15#include <linux/of_device.h>
16#include <linux/delay.h>
17#include <linux/dma/qcom_bam_dma.h>
18
19
20#define NAND_FLASH_CMD 0x00
21#define NAND_ADDR0 0x04
22#define NAND_ADDR1 0x08
23#define NAND_FLASH_CHIP_SELECT 0x0c
24#define NAND_EXEC_CMD 0x10
25#define NAND_FLASH_STATUS 0x14
26#define NAND_BUFFER_STATUS 0x18
27#define NAND_DEV0_CFG0 0x20
28#define NAND_DEV0_CFG1 0x24
29#define NAND_DEV0_ECC_CFG 0x28
30#define NAND_AUTO_STATUS_EN 0x2c
31#define NAND_DEV1_CFG0 0x30
32#define NAND_DEV1_CFG1 0x34
33#define NAND_READ_ID 0x40
34#define NAND_READ_STATUS 0x44
35#define NAND_DEV_CMD0 0xa0
36#define NAND_DEV_CMD1 0xa4
37#define NAND_DEV_CMD2 0xa8
38#define NAND_DEV_CMD_VLD 0xac
39#define SFLASHC_BURST_CFG 0xe0
40#define NAND_ERASED_CW_DETECT_CFG 0xe8
41#define NAND_ERASED_CW_DETECT_STATUS 0xec
42#define NAND_EBI2_ECC_BUF_CFG 0xf0
43#define FLASH_BUF_ACC 0x100
44
45#define NAND_CTRL 0xf00
46#define NAND_VERSION 0xf08
47#define NAND_READ_LOCATION_0 0xf20
48#define NAND_READ_LOCATION_1 0xf24
49#define NAND_READ_LOCATION_2 0xf28
50#define NAND_READ_LOCATION_3 0xf2c
51#define NAND_READ_LOCATION_LAST_CW_0 0xf40
52#define NAND_READ_LOCATION_LAST_CW_1 0xf44
53#define NAND_READ_LOCATION_LAST_CW_2 0xf48
54#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
55
56
57#define NAND_DEV_CMD1_RESTORE 0xdead
58#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
59
60
61#define PAGE_ACC BIT(4)
62#define LAST_PAGE BIT(5)
63
64
65#define NAND_DEV_SEL 0
66#define DM_EN BIT(2)
67
68
69#define FS_OP_ERR BIT(4)
70#define FS_READY_BSY_N BIT(5)
71#define FS_MPU_ERR BIT(8)
72#define FS_DEVICE_STS_ERR BIT(16)
73#define FS_DEVICE_WP BIT(23)
74
75
76#define BS_UNCORRECTABLE_BIT BIT(8)
77#define BS_CORRECTABLE_ERR_MSK 0x1f
78
79
80#define DISABLE_STATUS_AFTER_WRITE 4
81#define CW_PER_PAGE 6
82#define UD_SIZE_BYTES 9
83#define ECC_PARITY_SIZE_BYTES_RS 19
84#define SPARE_SIZE_BYTES 23
85#define NUM_ADDR_CYCLES 27
86#define STATUS_BFR_READ 30
87#define SET_RD_MODE_AFTER_STATUS 31
88
89
90#define DEV0_CFG1_ECC_DISABLE 0
91#define WIDE_FLASH 1
92#define NAND_RECOVERY_CYCLES 2
93#define CS_ACTIVE_BSY 5
94#define BAD_BLOCK_BYTE_NUM 6
95#define BAD_BLOCK_IN_SPARE_AREA 16
96#define WR_RD_BSY_GAP 17
97#define ENABLE_BCH_ECC 27
98
99
100#define ECC_CFG_ECC_DISABLE 0
101#define ECC_SW_RESET 1
102#define ECC_MODE 4
103#define ECC_PARITY_SIZE_BYTES_BCH 8
104#define ECC_NUM_DATA_BYTES 16
105#define ECC_FORCE_CLK_OPEN 30
106
107
108#define READ_ADDR 0
109
110
111#define READ_START_VLD BIT(0)
112#define READ_STOP_VLD BIT(1)
113#define WRITE_START_VLD BIT(2)
114#define ERASE_START_VLD BIT(3)
115#define SEQ_READ_START_VLD BIT(4)
116
117
118#define NUM_STEPS 0
119
120
121#define ERASED_CW_ECC_MASK 1
122#define AUTO_DETECT_RES 0
123#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
124#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
125#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
126#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
127#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
128
129
130#define PAGE_ALL_ERASED BIT(7)
131#define CODEWORD_ALL_ERASED BIT(6)
132#define PAGE_ERASED BIT(5)
133#define CODEWORD_ERASED BIT(4)
134#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
135#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
136
137
138#define READ_LOCATION_OFFSET 0
139#define READ_LOCATION_SIZE 16
140#define READ_LOCATION_LAST 31
141
142
143#define NAND_VERSION_MAJOR_MASK 0xf0000000
144#define NAND_VERSION_MAJOR_SHIFT 28
145#define NAND_VERSION_MINOR_MASK 0x0fff0000
146#define NAND_VERSION_MINOR_SHIFT 16
147
148
149#define OP_PAGE_READ 0x2
150#define OP_PAGE_READ_WITH_ECC 0x3
151#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
152#define OP_PAGE_READ_ONFI_READ 0x5
153#define OP_PROGRAM_PAGE 0x6
154#define OP_PAGE_PROGRAM_WITH_ECC 0x7
155#define OP_PROGRAM_PAGE_SPARE 0x9
156#define OP_BLOCK_ERASE 0xa
157#define OP_FETCH_ID 0xb
158#define OP_RESET_DEVICE 0xd
159
160
161#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
162 ERASE_START_VLD | SEQ_READ_START_VLD)
163
164
165#define BAM_MODE_EN BIT(0)
166
167
168
169
170
171#define NANDC_STEP_SIZE 512
172
173
174
175
176
177#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
178
179
180#define MAX_REG_RD (3 * MAX_NUM_STEPS)
181
182
183#define ECC_NONE BIT(0)
184#define ECC_RS_4BIT BIT(1)
185#define ECC_BCH_4BIT BIT(2)
186#define ECC_BCH_8BIT BIT(3)
187
188#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \
189nandc_set_reg(chip, reg, \
190 ((cw_offset) << READ_LOCATION_OFFSET) | \
191 ((read_size) << READ_LOCATION_SIZE) | \
192 ((is_last_read_loc) << READ_LOCATION_LAST))
193
194#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \
195nandc_set_reg(chip, reg, \
196 ((cw_offset) << READ_LOCATION_OFFSET) | \
197 ((read_size) << READ_LOCATION_SIZE) | \
198 ((is_last_read_loc) << READ_LOCATION_LAST))
199
200
201
202
203#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
204
205
206#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
207
208
209#define reg_buf_dma_addr(chip, vaddr) \
210 ((chip)->reg_read_dma + \
211 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
212
213#define QPIC_PER_CW_CMD_ELEMENTS 32
214#define QPIC_PER_CW_CMD_SGL 32
215#define QPIC_PER_CW_DATA_SGL 8
216
217#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
218
219
220
221
222
223
224#define NAND_BAM_NO_EOT BIT(0)
225
226#define NAND_BAM_NWD BIT(1)
227
228#define NAND_BAM_NEXT_SGL BIT(2)
229
230
231
232
233#define NAND_ERASED_CW_SET BIT(4)
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257struct bam_transaction {
258 struct bam_cmd_element *bam_ce;
259 struct scatterlist *cmd_sgl;
260 struct scatterlist *data_sgl;
261 u32 bam_ce_pos;
262 u32 bam_ce_start;
263 u32 cmd_sgl_pos;
264 u32 cmd_sgl_start;
265 u32 tx_sgl_pos;
266 u32 tx_sgl_start;
267 u32 rx_sgl_pos;
268 u32 rx_sgl_start;
269 bool wait_second_completion;
270 struct completion txn_done;
271 struct dma_async_tx_descriptor *last_data_desc;
272 struct dma_async_tx_descriptor *last_cmd_desc;
273};
274
275
276
277
278
279
280
281
282
283
284
285struct desc_info {
286 struct list_head node;
287
288 enum dma_data_direction dir;
289 union {
290 struct scatterlist adm_sgl;
291 struct {
292 struct scatterlist *bam_sgl;
293 int sgl_cnt;
294 };
295 };
296 struct dma_async_tx_descriptor *dma_desc;
297};
298
299
300
301
302
303struct nandc_regs {
304 __le32 cmd;
305 __le32 addr0;
306 __le32 addr1;
307 __le32 chip_sel;
308 __le32 exec;
309
310 __le32 cfg0;
311 __le32 cfg1;
312 __le32 ecc_bch_cfg;
313
314 __le32 clrflashstatus;
315 __le32 clrreadstatus;
316
317 __le32 cmd1;
318 __le32 vld;
319
320 __le32 orig_cmd1;
321 __le32 orig_vld;
322
323 __le32 ecc_buf_cfg;
324 __le32 read_location0;
325 __le32 read_location1;
326 __le32 read_location2;
327 __le32 read_location3;
328 __le32 read_location_last0;
329 __le32 read_location_last1;
330 __le32 read_location_last2;
331 __le32 read_location_last3;
332
333 __le32 erased_cw_detect_cfg_clr;
334 __le32 erased_cw_detect_cfg_set;
335};
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373struct qcom_nand_controller {
374 struct nand_controller controller;
375 struct list_head host_list;
376
377 struct device *dev;
378
379 void __iomem *base;
380 phys_addr_t base_phys;
381 dma_addr_t base_dma;
382
383 struct clk *core_clk;
384 struct clk *aon_clk;
385
386 union {
387
388 struct {
389 struct dma_chan *tx_chan;
390 struct dma_chan *rx_chan;
391 struct dma_chan *cmd_chan;
392 };
393
394
395 struct {
396 struct dma_chan *chan;
397 unsigned int cmd_crci;
398 unsigned int data_crci;
399 };
400 };
401
402 struct list_head desc_list;
403 struct bam_transaction *bam_txn;
404
405 u8 *data_buffer;
406 int buf_size;
407 int buf_count;
408 int buf_start;
409 unsigned int max_cwperpage;
410
411 __le32 *reg_read_buf;
412 dma_addr_t reg_read_dma;
413 int reg_read_pos;
414
415 struct nandc_regs *regs;
416
417 u32 cmd1, vld;
418 const struct qcom_nandc_props *props;
419};
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448struct qcom_nand_host {
449 struct nand_chip chip;
450 struct list_head node;
451
452 int cs;
453 int cw_size;
454 int cw_data;
455 bool use_ecc;
456 bool bch_enabled;
457 int ecc_bytes_hw;
458 int spare_bytes;
459 int bbm_size;
460 u8 status;
461 int last_command;
462
463 u32 cfg0, cfg1;
464 u32 cfg0_raw, cfg1_raw;
465 u32 ecc_buf_cfg;
466 u32 ecc_bch_cfg;
467 u32 clrflashstatus;
468 u32 clrreadstatus;
469};
470
471
472
473
474
475
476
477
478
479
480struct qcom_nandc_props {
481 u32 ecc_modes;
482 bool is_bam;
483 bool is_qpic;
484 bool qpic_v2;
485 u32 dev_cmd_reg_start;
486};
487
488
489static void free_bam_transaction(struct qcom_nand_controller *nandc)
490{
491 struct bam_transaction *bam_txn = nandc->bam_txn;
492
493 devm_kfree(nandc->dev, bam_txn);
494}
495
496
497static struct bam_transaction *
498alloc_bam_transaction(struct qcom_nand_controller *nandc)
499{
500 struct bam_transaction *bam_txn;
501 size_t bam_txn_size;
502 unsigned int num_cw = nandc->max_cwperpage;
503 void *bam_txn_buf;
504
505 bam_txn_size =
506 sizeof(*bam_txn) + num_cw *
507 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
508 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
509 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
510
511 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
512 if (!bam_txn_buf)
513 return NULL;
514
515 bam_txn = bam_txn_buf;
516 bam_txn_buf += sizeof(*bam_txn);
517
518 bam_txn->bam_ce = bam_txn_buf;
519 bam_txn_buf +=
520 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
521
522 bam_txn->cmd_sgl = bam_txn_buf;
523 bam_txn_buf +=
524 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
525
526 bam_txn->data_sgl = bam_txn_buf;
527
528 init_completion(&bam_txn->txn_done);
529
530 return bam_txn;
531}
532
533
534static void clear_bam_transaction(struct qcom_nand_controller *nandc)
535{
536 struct bam_transaction *bam_txn = nandc->bam_txn;
537
538 if (!nandc->props->is_bam)
539 return;
540
541 bam_txn->bam_ce_pos = 0;
542 bam_txn->bam_ce_start = 0;
543 bam_txn->cmd_sgl_pos = 0;
544 bam_txn->cmd_sgl_start = 0;
545 bam_txn->tx_sgl_pos = 0;
546 bam_txn->tx_sgl_start = 0;
547 bam_txn->rx_sgl_pos = 0;
548 bam_txn->rx_sgl_start = 0;
549 bam_txn->last_data_desc = NULL;
550 bam_txn->wait_second_completion = false;
551
552 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
553 QPIC_PER_CW_CMD_SGL);
554 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
555 QPIC_PER_CW_DATA_SGL);
556
557 reinit_completion(&bam_txn->txn_done);
558}
559
560
561static void qpic_bam_dma_done(void *data)
562{
563 struct bam_transaction *bam_txn = data;
564
565
566
567
568
569
570
571
572 if (bam_txn->wait_second_completion)
573 bam_txn->wait_second_completion = false;
574 else
575 complete(&bam_txn->txn_done);
576}
577
578static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
579{
580 return container_of(chip, struct qcom_nand_host, chip);
581}
582
583static inline struct qcom_nand_controller *
584get_qcom_nand_controller(struct nand_chip *chip)
585{
586 return container_of(chip->controller, struct qcom_nand_controller,
587 controller);
588}
589
590static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
591{
592 return ioread32(nandc->base + offset);
593}
594
595static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
596 u32 val)
597{
598 iowrite32(val, nandc->base + offset);
599}
600
601static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
602 bool is_cpu)
603{
604 if (!nandc->props->is_bam)
605 return;
606
607 if (is_cpu)
608 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
609 MAX_REG_RD *
610 sizeof(*nandc->reg_read_buf),
611 DMA_FROM_DEVICE);
612 else
613 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
614 MAX_REG_RD *
615 sizeof(*nandc->reg_read_buf),
616 DMA_FROM_DEVICE);
617}
618
619static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
620{
621 switch (offset) {
622 case NAND_FLASH_CMD:
623 return ®s->cmd;
624 case NAND_ADDR0:
625 return ®s->addr0;
626 case NAND_ADDR1:
627 return ®s->addr1;
628 case NAND_FLASH_CHIP_SELECT:
629 return ®s->chip_sel;
630 case NAND_EXEC_CMD:
631 return ®s->exec;
632 case NAND_FLASH_STATUS:
633 return ®s->clrflashstatus;
634 case NAND_DEV0_CFG0:
635 return ®s->cfg0;
636 case NAND_DEV0_CFG1:
637 return ®s->cfg1;
638 case NAND_DEV0_ECC_CFG:
639 return ®s->ecc_bch_cfg;
640 case NAND_READ_STATUS:
641 return ®s->clrreadstatus;
642 case NAND_DEV_CMD1:
643 return ®s->cmd1;
644 case NAND_DEV_CMD1_RESTORE:
645 return ®s->orig_cmd1;
646 case NAND_DEV_CMD_VLD:
647 return ®s->vld;
648 case NAND_DEV_CMD_VLD_RESTORE:
649 return ®s->orig_vld;
650 case NAND_EBI2_ECC_BUF_CFG:
651 return ®s->ecc_buf_cfg;
652 case NAND_READ_LOCATION_0:
653 return ®s->read_location0;
654 case NAND_READ_LOCATION_1:
655 return ®s->read_location1;
656 case NAND_READ_LOCATION_2:
657 return ®s->read_location2;
658 case NAND_READ_LOCATION_3:
659 return ®s->read_location3;
660 case NAND_READ_LOCATION_LAST_CW_0:
661 return ®s->read_location_last0;
662 case NAND_READ_LOCATION_LAST_CW_1:
663 return ®s->read_location_last1;
664 case NAND_READ_LOCATION_LAST_CW_2:
665 return ®s->read_location_last2;
666 case NAND_READ_LOCATION_LAST_CW_3:
667 return ®s->read_location_last3;
668 default:
669 return NULL;
670 }
671}
672
673static void nandc_set_reg(struct nand_chip *chip, int offset,
674 u32 val)
675{
676 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
677 struct nandc_regs *regs = nandc->regs;
678 __le32 *reg;
679
680 reg = offset_to_nandc_reg(regs, offset);
681
682 if (reg)
683 *reg = cpu_to_le32(val);
684}
685
686
687static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
688{
689 return cw == (ecc->steps - 1);
690}
691
692
693static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
694 int cw_offset, int read_size, int is_last_read_loc)
695{
696 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
697 struct nand_ecc_ctrl *ecc = &chip->ecc;
698 int reg_base = NAND_READ_LOCATION_0;
699
700 if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
701 reg_base = NAND_READ_LOCATION_LAST_CW_0;
702
703 reg_base += reg * 4;
704
705 if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
706 return nandc_set_read_loc_last(chip, reg_base, cw_offset,
707 read_size, is_last_read_loc);
708 else
709 return nandc_set_read_loc_first(chip, reg_base, cw_offset,
710 read_size, is_last_read_loc);
711}
712
713
714static void set_address(struct qcom_nand_host *host, u16 column, int page)
715{
716 struct nand_chip *chip = &host->chip;
717
718 if (chip->options & NAND_BUSWIDTH_16)
719 column >>= 1;
720
721 nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
722 nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
723}
724
725
726
727
728
729
730
731
732
733static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
734{
735 struct nand_chip *chip = &host->chip;
736 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
737 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
738
739 if (read) {
740 if (host->use_ecc)
741 cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
742 else
743 cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
744 } else {
745 cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
746 }
747
748 if (host->use_ecc) {
749 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
750 (num_cw - 1) << CW_PER_PAGE;
751
752 cfg1 = host->cfg1;
753 ecc_bch_cfg = host->ecc_bch_cfg;
754 } else {
755 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
756 (num_cw - 1) << CW_PER_PAGE;
757
758 cfg1 = host->cfg1_raw;
759 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
760 }
761
762 nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
763 nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
764 nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
765 nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
766 if (!nandc->props->qpic_v2)
767 nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
768 nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
769 nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
770 nandc_set_reg(chip, NAND_EXEC_CMD, 1);
771
772 if (read)
773 nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
774 host->cw_data : host->cw_size, 1);
775}
776
777
778
779
780
781
782static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
783 struct dma_chan *chan,
784 unsigned long flags)
785{
786 struct desc_info *desc;
787 struct scatterlist *sgl;
788 unsigned int sgl_cnt;
789 int ret;
790 struct bam_transaction *bam_txn = nandc->bam_txn;
791 enum dma_transfer_direction dir_eng;
792 struct dma_async_tx_descriptor *dma_desc;
793
794 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
795 if (!desc)
796 return -ENOMEM;
797
798 if (chan == nandc->cmd_chan) {
799 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
800 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
801 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
802 dir_eng = DMA_MEM_TO_DEV;
803 desc->dir = DMA_TO_DEVICE;
804 } else if (chan == nandc->tx_chan) {
805 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
806 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
807 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
808 dir_eng = DMA_MEM_TO_DEV;
809 desc->dir = DMA_TO_DEVICE;
810 } else {
811 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
812 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
813 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
814 dir_eng = DMA_DEV_TO_MEM;
815 desc->dir = DMA_FROM_DEVICE;
816 }
817
818 sg_mark_end(sgl + sgl_cnt - 1);
819 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
820 if (ret == 0) {
821 dev_err(nandc->dev, "failure in mapping desc\n");
822 kfree(desc);
823 return -ENOMEM;
824 }
825
826 desc->sgl_cnt = sgl_cnt;
827 desc->bam_sgl = sgl;
828
829 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
830 flags);
831
832 if (!dma_desc) {
833 dev_err(nandc->dev, "failure in prep desc\n");
834 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
835 kfree(desc);
836 return -EINVAL;
837 }
838
839 desc->dma_desc = dma_desc;
840
841
842 if (chan == nandc->cmd_chan)
843 bam_txn->last_cmd_desc = dma_desc;
844 else
845 bam_txn->last_data_desc = dma_desc;
846
847 list_add_tail(&desc->node, &nandc->desc_list);
848
849 return 0;
850}
851
852
853
854
855
856
857
858
859
860
861static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
862 int reg_off, const void *vaddr,
863 int size, unsigned int flags)
864{
865 int bam_ce_size;
866 int i, ret;
867 struct bam_cmd_element *bam_ce_buffer;
868 struct bam_transaction *bam_txn = nandc->bam_txn;
869
870 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
871
872
873 for (i = 0; i < size; i++) {
874 if (read)
875 bam_prep_ce(&bam_ce_buffer[i],
876 nandc_reg_phys(nandc, reg_off + 4 * i),
877 BAM_READ_COMMAND,
878 reg_buf_dma_addr(nandc,
879 (__le32 *)vaddr + i));
880 else
881 bam_prep_ce_le32(&bam_ce_buffer[i],
882 nandc_reg_phys(nandc, reg_off + 4 * i),
883 BAM_WRITE_COMMAND,
884 *((__le32 *)vaddr + i));
885 }
886
887 bam_txn->bam_ce_pos += size;
888
889
890 if (flags & NAND_BAM_NEXT_SGL) {
891 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
892 bam_ce_size = (bam_txn->bam_ce_pos -
893 bam_txn->bam_ce_start) *
894 sizeof(struct bam_cmd_element);
895 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
896 bam_ce_buffer, bam_ce_size);
897 bam_txn->cmd_sgl_pos++;
898 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
899
900 if (flags & NAND_BAM_NWD) {
901 ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
902 DMA_PREP_FENCE |
903 DMA_PREP_CMD);
904 if (ret)
905 return ret;
906 }
907 }
908
909 return 0;
910}
911
912
913
914
915
916static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
917 const void *vaddr,
918 int size, unsigned int flags)
919{
920 int ret;
921 struct bam_transaction *bam_txn = nandc->bam_txn;
922
923 if (read) {
924 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
925 vaddr, size);
926 bam_txn->rx_sgl_pos++;
927 } else {
928 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
929 vaddr, size);
930 bam_txn->tx_sgl_pos++;
931
932
933
934
935
936 if (!(flags & NAND_BAM_NO_EOT)) {
937 ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
938 DMA_PREP_INTERRUPT);
939 if (ret)
940 return ret;
941 }
942 }
943
944 return 0;
945}
946
947static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
948 int reg_off, const void *vaddr, int size,
949 bool flow_control)
950{
951 struct desc_info *desc;
952 struct dma_async_tx_descriptor *dma_desc;
953 struct scatterlist *sgl;
954 struct dma_slave_config slave_conf;
955 enum dma_transfer_direction dir_eng;
956 int ret;
957
958 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
959 if (!desc)
960 return -ENOMEM;
961
962 sgl = &desc->adm_sgl;
963
964 sg_init_one(sgl, vaddr, size);
965
966 if (read) {
967 dir_eng = DMA_DEV_TO_MEM;
968 desc->dir = DMA_FROM_DEVICE;
969 } else {
970 dir_eng = DMA_MEM_TO_DEV;
971 desc->dir = DMA_TO_DEVICE;
972 }
973
974 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
975 if (ret == 0) {
976 ret = -ENOMEM;
977 goto err;
978 }
979
980 memset(&slave_conf, 0x00, sizeof(slave_conf));
981
982 slave_conf.device_fc = flow_control;
983 if (read) {
984 slave_conf.src_maxburst = 16;
985 slave_conf.src_addr = nandc->base_dma + reg_off;
986 slave_conf.slave_id = nandc->data_crci;
987 } else {
988 slave_conf.dst_maxburst = 16;
989 slave_conf.dst_addr = nandc->base_dma + reg_off;
990 slave_conf.slave_id = nandc->cmd_crci;
991 }
992
993 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
994 if (ret) {
995 dev_err(nandc->dev, "failed to configure dma channel\n");
996 goto err;
997 }
998
999 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
1000 if (!dma_desc) {
1001 dev_err(nandc->dev, "failed to prepare desc\n");
1002 ret = -EINVAL;
1003 goto err;
1004 }
1005
1006 desc->dma_desc = dma_desc;
1007
1008 list_add_tail(&desc->node, &nandc->desc_list);
1009
1010 return 0;
1011err:
1012 kfree(desc);
1013
1014 return ret;
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
1026 int num_regs, unsigned int flags)
1027{
1028 bool flow_control = false;
1029 void *vaddr;
1030
1031 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
1032 nandc->reg_read_pos += num_regs;
1033
1034 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
1035 first = dev_cmd_reg_addr(nandc, first);
1036
1037 if (nandc->props->is_bam)
1038 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
1039 num_regs, flags);
1040
1041 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
1042 flow_control = true;
1043
1044 return prep_adm_dma_desc(nandc, true, first, vaddr,
1045 num_regs * sizeof(u32), flow_control);
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1057 int num_regs, unsigned int flags)
1058{
1059 bool flow_control = false;
1060 struct nandc_regs *regs = nandc->regs;
1061 void *vaddr;
1062
1063 vaddr = offset_to_nandc_reg(regs, first);
1064
1065 if (first == NAND_ERASED_CW_DETECT_CFG) {
1066 if (flags & NAND_ERASED_CW_SET)
1067 vaddr = ®s->erased_cw_detect_cfg_set;
1068 else
1069 vaddr = ®s->erased_cw_detect_cfg_clr;
1070 }
1071
1072 if (first == NAND_EXEC_CMD)
1073 flags |= NAND_BAM_NWD;
1074
1075 if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1076 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1077
1078 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1079 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1080
1081 if (nandc->props->is_bam)
1082 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1083 num_regs, flags);
1084
1085 if (first == NAND_FLASH_CMD)
1086 flow_control = true;
1087
1088 return prep_adm_dma_desc(nandc, false, first, vaddr,
1089 num_regs * sizeof(u32), flow_control);
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1102 const u8 *vaddr, int size, unsigned int flags)
1103{
1104 if (nandc->props->is_bam)
1105 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1106
1107 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1120 const u8 *vaddr, int size, unsigned int flags)
1121{
1122 if (nandc->props->is_bam)
1123 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1124
1125 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1126}
1127
1128
1129
1130
1131
1132static void config_nand_page_read(struct nand_chip *chip)
1133{
1134 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1135
1136 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1137 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1138 if (!nandc->props->qpic_v2)
1139 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1140 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1141 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1142 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1143}
1144
1145
1146
1147
1148
1149static void
1150config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
1151{
1152 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1153 struct nand_ecc_ctrl *ecc = &chip->ecc;
1154
1155 int reg = NAND_READ_LOCATION_0;
1156
1157 if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
1158 reg = NAND_READ_LOCATION_LAST_CW_0;
1159
1160 if (nandc->props->is_bam)
1161 write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
1162
1163 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1164 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1165
1166 if (use_ecc) {
1167 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1168 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1169 NAND_BAM_NEXT_SGL);
1170 } else {
1171 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1172 }
1173}
1174
1175
1176
1177
1178
1179static void
1180config_nand_single_cw_page_read(struct nand_chip *chip,
1181 bool use_ecc, int cw)
1182{
1183 config_nand_page_read(chip);
1184 config_nand_cw_read(chip, use_ecc, cw);
1185}
1186
1187
1188
1189
1190
1191static void config_nand_page_write(struct nand_chip *chip)
1192{
1193 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1194
1195 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1196 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1197 if (!nandc->props->qpic_v2)
1198 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1199 NAND_BAM_NEXT_SGL);
1200}
1201
1202
1203
1204
1205
1206static void config_nand_cw_write(struct nand_chip *chip)
1207{
1208 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1209
1210 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1211 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1212
1213 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1214
1215 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1216 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1217}
1218
1219
1220
1221
1222
1223
1224
1225static int nandc_param(struct qcom_nand_host *host)
1226{
1227 struct nand_chip *chip = &host->chip;
1228 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1229
1230
1231
1232
1233
1234
1235 if (nandc->props->qpic_v2)
1236 nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ |
1237 PAGE_ACC | LAST_PAGE);
1238 else
1239 nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ |
1240 PAGE_ACC | LAST_PAGE);
1241
1242 nandc_set_reg(chip, NAND_ADDR0, 0);
1243 nandc_set_reg(chip, NAND_ADDR1, 0);
1244 nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1245 | 512 << UD_SIZE_BYTES
1246 | 5 << NUM_ADDR_CYCLES
1247 | 0 << SPARE_SIZE_BYTES);
1248 nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1249 | 0 << CS_ACTIVE_BSY
1250 | 17 << BAD_BLOCK_BYTE_NUM
1251 | 1 << BAD_BLOCK_IN_SPARE_AREA
1252 | 2 << WR_RD_BSY_GAP
1253 | 0 << WIDE_FLASH
1254 | 1 << DEV0_CFG1_ECC_DISABLE);
1255 if (!nandc->props->qpic_v2)
1256 nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1257
1258
1259 if (!nandc->props->qpic_v2) {
1260 nandc_set_reg(chip, NAND_DEV_CMD_VLD,
1261 (nandc->vld & ~READ_START_VLD));
1262 nandc_set_reg(chip, NAND_DEV_CMD1,
1263 (nandc->cmd1 & ~(0xFF << READ_ADDR))
1264 | NAND_CMD_PARAM << READ_ADDR);
1265 }
1266
1267 nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1268
1269 if (!nandc->props->qpic_v2) {
1270 nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1271 nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1272 }
1273
1274 nandc_set_read_loc(chip, 0, 0, 0, 512, 1);
1275
1276 if (!nandc->props->qpic_v2) {
1277 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1278 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1279 }
1280
1281 nandc->buf_count = 512;
1282 memset(nandc->data_buffer, 0xff, nandc->buf_count);
1283
1284 config_nand_single_cw_page_read(chip, false, 0);
1285
1286 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1287 nandc->buf_count, 0);
1288
1289
1290 if (!nandc->props->qpic_v2) {
1291 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1292 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1293 }
1294
1295 return 0;
1296}
1297
1298
1299static int erase_block(struct qcom_nand_host *host, int page_addr)
1300{
1301 struct nand_chip *chip = &host->chip;
1302 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1303
1304 nandc_set_reg(chip, NAND_FLASH_CMD,
1305 OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1306 nandc_set_reg(chip, NAND_ADDR0, page_addr);
1307 nandc_set_reg(chip, NAND_ADDR1, 0);
1308 nandc_set_reg(chip, NAND_DEV0_CFG0,
1309 host->cfg0_raw & ~(7 << CW_PER_PAGE));
1310 nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
1311 nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1312 nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
1313 nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
1314
1315 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1316 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1317 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1318
1319 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1320
1321 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1322 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1323
1324 return 0;
1325}
1326
1327
1328static int read_id(struct qcom_nand_host *host, int column)
1329{
1330 struct nand_chip *chip = &host->chip;
1331 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1332
1333 if (column == -1)
1334 return 0;
1335
1336 nandc_set_reg(chip, NAND_FLASH_CMD, OP_FETCH_ID);
1337 nandc_set_reg(chip, NAND_ADDR0, column);
1338 nandc_set_reg(chip, NAND_ADDR1, 0);
1339 nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
1340 nandc->props->is_bam ? 0 : DM_EN);
1341 nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1342
1343 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1344 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1345
1346 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1347
1348 return 0;
1349}
1350
1351
1352static int reset(struct qcom_nand_host *host)
1353{
1354 struct nand_chip *chip = &host->chip;
1355 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1356
1357 nandc_set_reg(chip, NAND_FLASH_CMD, OP_RESET_DEVICE);
1358 nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1359
1360 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1361 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1362
1363 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1364
1365 return 0;
1366}
1367
1368
1369static int submit_descs(struct qcom_nand_controller *nandc)
1370{
1371 struct desc_info *desc;
1372 dma_cookie_t cookie = 0;
1373 struct bam_transaction *bam_txn = nandc->bam_txn;
1374 int r;
1375
1376 if (nandc->props->is_bam) {
1377 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1378 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1379 if (r)
1380 return r;
1381 }
1382
1383 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1384 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1385 DMA_PREP_INTERRUPT);
1386 if (r)
1387 return r;
1388 }
1389
1390 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1391 r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1392 DMA_PREP_CMD);
1393 if (r)
1394 return r;
1395 }
1396 }
1397
1398 list_for_each_entry(desc, &nandc->desc_list, node)
1399 cookie = dmaengine_submit(desc->dma_desc);
1400
1401 if (nandc->props->is_bam) {
1402 bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1403 bam_txn->last_cmd_desc->callback_param = bam_txn;
1404 if (bam_txn->last_data_desc) {
1405 bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1406 bam_txn->last_data_desc->callback_param = bam_txn;
1407 bam_txn->wait_second_completion = true;
1408 }
1409
1410 dma_async_issue_pending(nandc->tx_chan);
1411 dma_async_issue_pending(nandc->rx_chan);
1412 dma_async_issue_pending(nandc->cmd_chan);
1413
1414 if (!wait_for_completion_timeout(&bam_txn->txn_done,
1415 QPIC_NAND_COMPLETION_TIMEOUT))
1416 return -ETIMEDOUT;
1417 } else {
1418 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1419 return -ETIMEDOUT;
1420 }
1421
1422 return 0;
1423}
1424
1425static void free_descs(struct qcom_nand_controller *nandc)
1426{
1427 struct desc_info *desc, *n;
1428
1429 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1430 list_del(&desc->node);
1431
1432 if (nandc->props->is_bam)
1433 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1434 desc->sgl_cnt, desc->dir);
1435 else
1436 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1437 desc->dir);
1438
1439 kfree(desc);
1440 }
1441}
1442
1443
1444static void clear_read_regs(struct qcom_nand_controller *nandc)
1445{
1446 nandc->reg_read_pos = 0;
1447 nandc_read_buffer_sync(nandc, false);
1448}
1449
1450static void pre_command(struct qcom_nand_host *host, int command)
1451{
1452 struct nand_chip *chip = &host->chip;
1453 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1454
1455 nandc->buf_count = 0;
1456 nandc->buf_start = 0;
1457 host->use_ecc = false;
1458 host->last_command = command;
1459
1460 clear_read_regs(nandc);
1461
1462 if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1463 command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1464 clear_bam_transaction(nandc);
1465}
1466
1467
1468
1469
1470
1471
1472static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1473{
1474 struct nand_chip *chip = &host->chip;
1475 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1476 struct nand_ecc_ctrl *ecc = &chip->ecc;
1477 int num_cw;
1478 int i;
1479
1480 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1481 nandc_read_buffer_sync(nandc, true);
1482
1483 for (i = 0; i < num_cw; i++) {
1484 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1485
1486 if (flash_status & FS_MPU_ERR)
1487 host->status &= ~NAND_STATUS_WP;
1488
1489 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1490 (flash_status &
1491 FS_DEVICE_STS_ERR)))
1492 host->status |= NAND_STATUS_FAIL;
1493 }
1494}
1495
1496static void post_command(struct qcom_nand_host *host, int command)
1497{
1498 struct nand_chip *chip = &host->chip;
1499 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1500
1501 switch (command) {
1502 case NAND_CMD_READID:
1503 nandc_read_buffer_sync(nandc, true);
1504 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1505 nandc->buf_count);
1506 break;
1507 case NAND_CMD_PAGEPROG:
1508 case NAND_CMD_ERASE1:
1509 parse_erase_write_errors(host, command);
1510 break;
1511 default:
1512 break;
1513 }
1514}
1515
1516
1517
1518
1519
1520
1521
1522static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
1523 int column, int page_addr)
1524{
1525 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1526 struct nand_ecc_ctrl *ecc = &chip->ecc;
1527 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1528 bool wait = false;
1529 int ret = 0;
1530
1531 pre_command(host, command);
1532
1533 switch (command) {
1534 case NAND_CMD_RESET:
1535 ret = reset(host);
1536 wait = true;
1537 break;
1538
1539 case NAND_CMD_READID:
1540 nandc->buf_count = 4;
1541 ret = read_id(host, column);
1542 wait = true;
1543 break;
1544
1545 case NAND_CMD_PARAM:
1546 ret = nandc_param(host);
1547 wait = true;
1548 break;
1549
1550 case NAND_CMD_ERASE1:
1551 ret = erase_block(host, page_addr);
1552 wait = true;
1553 break;
1554
1555 case NAND_CMD_READ0:
1556
1557 WARN_ON(column != 0);
1558
1559 host->use_ecc = true;
1560 set_address(host, 0, page_addr);
1561 update_rw_regs(host, ecc->steps, true, 0);
1562 break;
1563
1564 case NAND_CMD_SEQIN:
1565 WARN_ON(column != 0);
1566 set_address(host, 0, page_addr);
1567 break;
1568
1569 case NAND_CMD_PAGEPROG:
1570 case NAND_CMD_STATUS:
1571 case NAND_CMD_NONE:
1572 default:
1573 break;
1574 }
1575
1576 if (ret) {
1577 dev_err(nandc->dev, "failure executing command %d\n",
1578 command);
1579 free_descs(nandc);
1580 return;
1581 }
1582
1583 if (wait) {
1584 ret = submit_descs(nandc);
1585 if (ret)
1586 dev_err(nandc->dev,
1587 "failure submitting descs for command %d\n",
1588 command);
1589 }
1590
1591 free_descs(nandc);
1592
1593 post_command(host, command);
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1608{
1609 u8 empty1, empty2;
1610
1611
1612
1613
1614
1615
1616
1617 empty1 = data_buf[3];
1618 empty2 = data_buf[175];
1619
1620
1621
1622
1623
1624 if ((empty1 == 0x54 && empty2 == 0xff) ||
1625 (empty1 == 0xff && empty2 == 0x54)) {
1626 data_buf[3] = 0xff;
1627 data_buf[175] = 0xff;
1628 }
1629
1630
1631
1632
1633
1634 if (memchr_inv(data_buf, 0xff, data_len)) {
1635 data_buf[3] = empty1;
1636 data_buf[175] = empty2;
1637
1638 return false;
1639 }
1640
1641 return true;
1642}
1643
1644struct read_stats {
1645 __le32 flash;
1646 __le32 buffer;
1647 __le32 erased_cw;
1648};
1649
1650
1651static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1652{
1653 struct nand_chip *chip = &host->chip;
1654 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1655 int i;
1656
1657 nandc_read_buffer_sync(nandc, true);
1658
1659 for (i = 0; i < cw_cnt; i++) {
1660 u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1661
1662 if (flash & (FS_OP_ERR | FS_MPU_ERR))
1663 return -EIO;
1664 }
1665
1666 return 0;
1667}
1668
1669
1670static int
1671qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1672 u8 *data_buf, u8 *oob_buf, int page, int cw)
1673{
1674 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1675 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1676 struct nand_ecc_ctrl *ecc = &chip->ecc;
1677 int data_size1, data_size2, oob_size1, oob_size2;
1678 int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1679 int raw_cw = cw;
1680
1681 nand_read_page_op(chip, page, 0, NULL, 0);
1682 host->use_ecc = false;
1683
1684 if (nandc->props->qpic_v2)
1685 raw_cw = ecc->steps - 1;
1686
1687 clear_bam_transaction(nandc);
1688 set_address(host, host->cw_size * cw, page);
1689 update_rw_regs(host, 1, true, raw_cw);
1690 config_nand_page_read(chip);
1691
1692 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1693 oob_size1 = host->bbm_size;
1694
1695 if (qcom_nandc_is_last_cw(ecc, cw)) {
1696 data_size2 = ecc->size - data_size1 -
1697 ((ecc->steps - 1) * 4);
1698 oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1699 host->spare_bytes;
1700 } else {
1701 data_size2 = host->cw_data - data_size1;
1702 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1703 }
1704
1705 if (nandc->props->is_bam) {
1706 nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
1707 read_loc += data_size1;
1708
1709 nandc_set_read_loc(chip, cw, 1, read_loc, oob_size1, 0);
1710 read_loc += oob_size1;
1711
1712 nandc_set_read_loc(chip, cw, 2, read_loc, data_size2, 0);
1713 read_loc += data_size2;
1714
1715 nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1);
1716 }
1717
1718 config_nand_cw_read(chip, false, raw_cw);
1719
1720 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1721 reg_off += data_size1;
1722
1723 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1724 reg_off += oob_size1;
1725
1726 read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1727 reg_off += data_size2;
1728
1729 read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1730
1731 ret = submit_descs(nandc);
1732 free_descs(nandc);
1733 if (ret) {
1734 dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1735 return ret;
1736 }
1737
1738 return check_flash_errors(host, 1);
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756static int
1757check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1758 u8 *oob_buf, unsigned long uncorrectable_cws,
1759 int page, unsigned int max_bitflips)
1760{
1761 struct nand_chip *chip = &host->chip;
1762 struct mtd_info *mtd = nand_to_mtd(chip);
1763 struct nand_ecc_ctrl *ecc = &chip->ecc;
1764 u8 *cw_data_buf, *cw_oob_buf;
1765 int cw, data_size, oob_size, ret = 0;
1766
1767 if (!data_buf)
1768 data_buf = nand_get_data_buf(chip);
1769
1770 if (!oob_buf) {
1771 nand_get_data_buf(chip);
1772 oob_buf = chip->oob_poi;
1773 }
1774
1775 for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1776 if (qcom_nandc_is_last_cw(ecc, cw)) {
1777 data_size = ecc->size - ((ecc->steps - 1) * 4);
1778 oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1779 } else {
1780 data_size = host->cw_data;
1781 oob_size = host->ecc_bytes_hw;
1782 }
1783
1784
1785 cw_data_buf = data_buf + (cw * host->cw_data);
1786 cw_oob_buf = oob_buf + (cw * ecc->bytes);
1787
1788 ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1789 cw_oob_buf, page, cw);
1790 if (ret)
1791 return ret;
1792
1793
1794
1795
1796
1797 ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1798 cw_oob_buf + host->bbm_size,
1799 oob_size, NULL,
1800 0, ecc->strength);
1801 if (ret < 0) {
1802 mtd->ecc_stats.failed++;
1803 } else {
1804 mtd->ecc_stats.corrected += ret;
1805 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1806 }
1807 }
1808
1809 return max_bitflips;
1810}
1811
1812
1813
1814
1815
1816static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1817 u8 *oob_buf, int page)
1818{
1819 struct nand_chip *chip = &host->chip;
1820 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1821 struct mtd_info *mtd = nand_to_mtd(chip);
1822 struct nand_ecc_ctrl *ecc = &chip->ecc;
1823 unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1824 struct read_stats *buf;
1825 bool flash_op_err = false, erased;
1826 int i;
1827 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1828
1829 buf = (struct read_stats *)nandc->reg_read_buf;
1830 nandc_read_buffer_sync(nandc, true);
1831
1832 for (i = 0; i < ecc->steps; i++, buf++) {
1833 u32 flash, buffer, erased_cw;
1834 int data_len, oob_len;
1835
1836 if (qcom_nandc_is_last_cw(ecc, i)) {
1837 data_len = ecc->size - ((ecc->steps - 1) << 2);
1838 oob_len = ecc->steps << 2;
1839 } else {
1840 data_len = host->cw_data;
1841 oob_len = 0;
1842 }
1843
1844 flash = le32_to_cpu(buf->flash);
1845 buffer = le32_to_cpu(buf->buffer);
1846 erased_cw = le32_to_cpu(buf->erased_cw);
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856 if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1857
1858
1859
1860
1861 if (host->bch_enabled) {
1862 erased = (erased_cw & ERASED_CW) == ERASED_CW;
1863
1864
1865
1866
1867
1868
1869 } else if (data_buf) {
1870 erased = erased_chunk_check_and_fixup(data_buf,
1871 data_len);
1872 } else {
1873 erased = false;
1874 }
1875
1876 if (!erased)
1877 uncorrectable_cws |= BIT(i);
1878
1879
1880
1881
1882
1883
1884 } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1885 flash_op_err = true;
1886
1887
1888
1889
1890 } else {
1891 unsigned int stat;
1892
1893 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1894 mtd->ecc_stats.corrected += stat;
1895 max_bitflips = max(max_bitflips, stat);
1896 }
1897
1898 if (data_buf)
1899 data_buf += data_len;
1900 if (oob_buf)
1901 oob_buf += oob_len + ecc->bytes;
1902 }
1903
1904 if (flash_op_err)
1905 return -EIO;
1906
1907 if (!uncorrectable_cws)
1908 return max_bitflips;
1909
1910 return check_for_erased_page(host, data_buf_start, oob_buf_start,
1911 uncorrectable_cws, page,
1912 max_bitflips);
1913}
1914
1915
1916
1917
1918
1919static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1920 u8 *oob_buf, int page)
1921{
1922 struct nand_chip *chip = &host->chip;
1923 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1924 struct nand_ecc_ctrl *ecc = &chip->ecc;
1925 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1926 int i, ret;
1927
1928 config_nand_page_read(chip);
1929
1930
1931 for (i = 0; i < ecc->steps; i++) {
1932 int data_size, oob_size;
1933
1934 if (qcom_nandc_is_last_cw(ecc, i)) {
1935 data_size = ecc->size - ((ecc->steps - 1) << 2);
1936 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1937 host->spare_bytes;
1938 } else {
1939 data_size = host->cw_data;
1940 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1941 }
1942
1943 if (nandc->props->is_bam) {
1944 if (data_buf && oob_buf) {
1945 nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
1946 nandc_set_read_loc(chip, i, 1, data_size,
1947 oob_size, 1);
1948 } else if (data_buf) {
1949 nandc_set_read_loc(chip, i, 0, 0, data_size, 1);
1950 } else {
1951 nandc_set_read_loc(chip, i, 0, data_size,
1952 oob_size, 1);
1953 }
1954 }
1955
1956 config_nand_cw_read(chip, true, i);
1957
1958 if (data_buf)
1959 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1960 data_size, 0);
1961
1962
1963
1964
1965
1966
1967
1968
1969 if (oob_buf) {
1970 int j;
1971
1972 for (j = 0; j < host->bbm_size; j++)
1973 *oob_buf++ = 0xff;
1974
1975 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1976 oob_buf, oob_size, 0);
1977 }
1978
1979 if (data_buf)
1980 data_buf += data_size;
1981 if (oob_buf)
1982 oob_buf += oob_size;
1983 }
1984
1985 ret = submit_descs(nandc);
1986 free_descs(nandc);
1987
1988 if (ret) {
1989 dev_err(nandc->dev, "failure to read page/oob\n");
1990 return ret;
1991 }
1992
1993 return parse_read_errors(host, data_buf_start, oob_buf_start, page);
1994}
1995
1996
1997
1998
1999
2000static int copy_last_cw(struct qcom_nand_host *host, int page)
2001{
2002 struct nand_chip *chip = &host->chip;
2003 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2004 struct nand_ecc_ctrl *ecc = &chip->ecc;
2005 int size;
2006 int ret;
2007
2008 clear_read_regs(nandc);
2009
2010 size = host->use_ecc ? host->cw_data : host->cw_size;
2011
2012
2013 memset(nandc->data_buffer, 0xff, size);
2014
2015 set_address(host, host->cw_size * (ecc->steps - 1), page);
2016 update_rw_regs(host, 1, true, ecc->steps - 1);
2017
2018 config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
2019
2020 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
2021
2022 ret = submit_descs(nandc);
2023 if (ret)
2024 dev_err(nandc->dev, "failed to copy last codeword\n");
2025
2026 free_descs(nandc);
2027
2028 return ret;
2029}
2030
2031
2032static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
2033 int oob_required, int page)
2034{
2035 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2036 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2037 u8 *data_buf, *oob_buf = NULL;
2038
2039 nand_read_page_op(chip, page, 0, NULL, 0);
2040 data_buf = buf;
2041 oob_buf = oob_required ? chip->oob_poi : NULL;
2042
2043 clear_bam_transaction(nandc);
2044
2045 return read_page_ecc(host, data_buf, oob_buf, page);
2046}
2047
2048
2049static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
2050 int oob_required, int page)
2051{
2052 struct mtd_info *mtd = nand_to_mtd(chip);
2053 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2054 struct nand_ecc_ctrl *ecc = &chip->ecc;
2055 int cw, ret;
2056 u8 *data_buf = buf, *oob_buf = chip->oob_poi;
2057
2058 for (cw = 0; cw < ecc->steps; cw++) {
2059 ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
2060 page, cw);
2061 if (ret)
2062 return ret;
2063
2064 data_buf += host->cw_data;
2065 oob_buf += ecc->bytes;
2066 }
2067
2068 return 0;
2069}
2070
2071
2072static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
2073{
2074 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2075 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2076 struct nand_ecc_ctrl *ecc = &chip->ecc;
2077
2078 clear_read_regs(nandc);
2079 clear_bam_transaction(nandc);
2080
2081 host->use_ecc = true;
2082 set_address(host, 0, page);
2083 update_rw_regs(host, ecc->steps, true, 0);
2084
2085 return read_page_ecc(host, NULL, chip->oob_poi, page);
2086}
2087
2088
2089static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
2090 int oob_required, int page)
2091{
2092 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2093 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2094 struct nand_ecc_ctrl *ecc = &chip->ecc;
2095 u8 *data_buf, *oob_buf;
2096 int i, ret;
2097
2098 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2099
2100 clear_read_regs(nandc);
2101 clear_bam_transaction(nandc);
2102
2103 data_buf = (u8 *)buf;
2104 oob_buf = chip->oob_poi;
2105
2106 host->use_ecc = true;
2107 update_rw_regs(host, ecc->steps, false, 0);
2108 config_nand_page_write(chip);
2109
2110 for (i = 0; i < ecc->steps; i++) {
2111 int data_size, oob_size;
2112
2113 if (qcom_nandc_is_last_cw(ecc, i)) {
2114 data_size = ecc->size - ((ecc->steps - 1) << 2);
2115 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
2116 host->spare_bytes;
2117 } else {
2118 data_size = host->cw_data;
2119 oob_size = ecc->bytes;
2120 }
2121
2122
2123 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
2124 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2125
2126
2127
2128
2129
2130
2131
2132
2133 if (qcom_nandc_is_last_cw(ecc, i)) {
2134 oob_buf += host->bbm_size;
2135
2136 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2137 oob_buf, oob_size, 0);
2138 }
2139
2140 config_nand_cw_write(chip);
2141
2142 data_buf += data_size;
2143 oob_buf += oob_size;
2144 }
2145
2146 ret = submit_descs(nandc);
2147 if (ret)
2148 dev_err(nandc->dev, "failure to write page\n");
2149
2150 free_descs(nandc);
2151
2152 if (!ret)
2153 ret = nand_prog_page_end_op(chip);
2154
2155 return ret;
2156}
2157
2158
2159static int qcom_nandc_write_page_raw(struct nand_chip *chip,
2160 const uint8_t *buf, int oob_required,
2161 int page)
2162{
2163 struct mtd_info *mtd = nand_to_mtd(chip);
2164 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2165 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2166 struct nand_ecc_ctrl *ecc = &chip->ecc;
2167 u8 *data_buf, *oob_buf;
2168 int i, ret;
2169
2170 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2171 clear_read_regs(nandc);
2172 clear_bam_transaction(nandc);
2173
2174 data_buf = (u8 *)buf;
2175 oob_buf = chip->oob_poi;
2176
2177 host->use_ecc = false;
2178 update_rw_regs(host, ecc->steps, false, 0);
2179 config_nand_page_write(chip);
2180
2181 for (i = 0; i < ecc->steps; i++) {
2182 int data_size1, data_size2, oob_size1, oob_size2;
2183 int reg_off = FLASH_BUF_ACC;
2184
2185 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2186 oob_size1 = host->bbm_size;
2187
2188 if (qcom_nandc_is_last_cw(ecc, i)) {
2189 data_size2 = ecc->size - data_size1 -
2190 ((ecc->steps - 1) << 2);
2191 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2192 host->spare_bytes;
2193 } else {
2194 data_size2 = host->cw_data - data_size1;
2195 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2196 }
2197
2198 write_data_dma(nandc, reg_off, data_buf, data_size1,
2199 NAND_BAM_NO_EOT);
2200 reg_off += data_size1;
2201 data_buf += data_size1;
2202
2203 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2204 NAND_BAM_NO_EOT);
2205 reg_off += oob_size1;
2206 oob_buf += oob_size1;
2207
2208 write_data_dma(nandc, reg_off, data_buf, data_size2,
2209 NAND_BAM_NO_EOT);
2210 reg_off += data_size2;
2211 data_buf += data_size2;
2212
2213 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2214 oob_buf += oob_size2;
2215
2216 config_nand_cw_write(chip);
2217 }
2218
2219 ret = submit_descs(nandc);
2220 if (ret)
2221 dev_err(nandc->dev, "failure to write raw page\n");
2222
2223 free_descs(nandc);
2224
2225 if (!ret)
2226 ret = nand_prog_page_end_op(chip);
2227
2228 return ret;
2229}
2230
2231
2232
2233
2234
2235
2236
2237
2238static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
2239{
2240 struct mtd_info *mtd = nand_to_mtd(chip);
2241 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2242 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2243 struct nand_ecc_ctrl *ecc = &chip->ecc;
2244 u8 *oob = chip->oob_poi;
2245 int data_size, oob_size;
2246 int ret;
2247
2248 host->use_ecc = true;
2249 clear_bam_transaction(nandc);
2250
2251
2252 data_size = ecc->size - ((ecc->steps - 1) << 2);
2253 oob_size = mtd->oobavail;
2254
2255 memset(nandc->data_buffer, 0xff, host->cw_data);
2256
2257 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2258 0, mtd->oobavail);
2259
2260 set_address(host, host->cw_size * (ecc->steps - 1), page);
2261 update_rw_regs(host, 1, false, 0);
2262
2263 config_nand_page_write(chip);
2264 write_data_dma(nandc, FLASH_BUF_ACC,
2265 nandc->data_buffer, data_size + oob_size, 0);
2266 config_nand_cw_write(chip);
2267
2268 ret = submit_descs(nandc);
2269
2270 free_descs(nandc);
2271
2272 if (ret) {
2273 dev_err(nandc->dev, "failure to write oob\n");
2274 return -EIO;
2275 }
2276
2277 return nand_prog_page_end_op(chip);
2278}
2279
2280static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
2281{
2282 struct mtd_info *mtd = nand_to_mtd(chip);
2283 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2284 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2285 struct nand_ecc_ctrl *ecc = &chip->ecc;
2286 int page, ret, bbpos, bad = 0;
2287
2288 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2289
2290
2291
2292
2293
2294
2295
2296 host->use_ecc = false;
2297
2298 clear_bam_transaction(nandc);
2299 ret = copy_last_cw(host, page);
2300 if (ret)
2301 goto err;
2302
2303 if (check_flash_errors(host, 1)) {
2304 dev_warn(nandc->dev, "error when trying to read BBM\n");
2305 goto err;
2306 }
2307
2308 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2309
2310 bad = nandc->data_buffer[bbpos] != 0xff;
2311
2312 if (chip->options & NAND_BUSWIDTH_16)
2313 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2314err:
2315 return bad;
2316}
2317
2318static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
2319{
2320 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2321 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2322 struct nand_ecc_ctrl *ecc = &chip->ecc;
2323 int page, ret;
2324
2325 clear_read_regs(nandc);
2326 clear_bam_transaction(nandc);
2327
2328
2329
2330
2331
2332
2333 memset(nandc->data_buffer, 0x00, host->cw_size);
2334
2335 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2336
2337
2338 host->use_ecc = false;
2339 set_address(host, host->cw_size * (ecc->steps - 1), page);
2340 update_rw_regs(host, 1, false, ecc->steps - 1);
2341
2342 config_nand_page_write(chip);
2343 write_data_dma(nandc, FLASH_BUF_ACC,
2344 nandc->data_buffer, host->cw_size, 0);
2345 config_nand_cw_write(chip);
2346
2347 ret = submit_descs(nandc);
2348
2349 free_descs(nandc);
2350
2351 if (ret) {
2352 dev_err(nandc->dev, "failure to update BBM\n");
2353 return -EIO;
2354 }
2355
2356 return nand_prog_page_end_op(chip);
2357}
2358
2359
2360
2361
2362
2363
2364
2365static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
2366{
2367 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2368 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2369 u8 *buf = nandc->data_buffer;
2370 u8 ret = 0x0;
2371
2372 if (host->last_command == NAND_CMD_STATUS) {
2373 ret = host->status;
2374
2375 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2376
2377 return ret;
2378 }
2379
2380 if (nandc->buf_start < nandc->buf_count)
2381 ret = buf[nandc->buf_start++];
2382
2383 return ret;
2384}
2385
2386static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
2387{
2388 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2389 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2390
2391 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2392 nandc->buf_start += real_len;
2393}
2394
2395static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
2396 int len)
2397{
2398 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2399 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2400
2401 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2402
2403 nandc->buf_start += real_len;
2404}
2405
2406
2407static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
2408{
2409 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2410
2411 if (chipnr <= 0)
2412 return;
2413
2414 dev_warn(nandc->dev, "invalid chip select\n");
2415}
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2503 struct mtd_oob_region *oobregion)
2504{
2505 struct nand_chip *chip = mtd_to_nand(mtd);
2506 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2507 struct nand_ecc_ctrl *ecc = &chip->ecc;
2508
2509 if (section > 1)
2510 return -ERANGE;
2511
2512 if (!section) {
2513 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2514 host->bbm_size;
2515 oobregion->offset = 0;
2516 } else {
2517 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2518 oobregion->offset = mtd->oobsize - oobregion->length;
2519 }
2520
2521 return 0;
2522}
2523
2524static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2525 struct mtd_oob_region *oobregion)
2526{
2527 struct nand_chip *chip = mtd_to_nand(mtd);
2528 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2529 struct nand_ecc_ctrl *ecc = &chip->ecc;
2530
2531 if (section)
2532 return -ERANGE;
2533
2534 oobregion->length = ecc->steps * 4;
2535 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2536
2537 return 0;
2538}
2539
2540static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2541 .ecc = qcom_nand_ooblayout_ecc,
2542 .free = qcom_nand_ooblayout_free,
2543};
2544
2545static int
2546qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2547{
2548 return strength == 4 ? 12 : 16;
2549}
2550NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2551 NANDC_STEP_SIZE, 4, 8);
2552
2553static int qcom_nand_attach_chip(struct nand_chip *chip)
2554{
2555 struct mtd_info *mtd = nand_to_mtd(chip);
2556 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2557 struct nand_ecc_ctrl *ecc = &chip->ecc;
2558 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2559 int cwperpage, bad_block_byte, ret;
2560 bool wide_bus;
2561 int ecc_mode = 1;
2562
2563
2564 ecc->size = NANDC_STEP_SIZE;
2565 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2566 cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2567
2568
2569
2570
2571
2572 ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2573 mtd->oobsize - (cwperpage * 4));
2574 if (ret) {
2575 dev_err(nandc->dev, "No valid ECC settings possible\n");
2576 return ret;
2577 }
2578
2579 if (ecc->strength >= 8) {
2580
2581 host->bch_enabled = true;
2582 ecc_mode = 1;
2583
2584 if (wide_bus) {
2585 host->ecc_bytes_hw = 14;
2586 host->spare_bytes = 0;
2587 host->bbm_size = 2;
2588 } else {
2589 host->ecc_bytes_hw = 13;
2590 host->spare_bytes = 2;
2591 host->bbm_size = 1;
2592 }
2593 } else {
2594
2595
2596
2597
2598
2599 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2600
2601 host->bch_enabled = true;
2602 ecc_mode = 0;
2603
2604 if (wide_bus) {
2605 host->ecc_bytes_hw = 8;
2606 host->spare_bytes = 2;
2607 host->bbm_size = 2;
2608 } else {
2609 host->ecc_bytes_hw = 7;
2610 host->spare_bytes = 4;
2611 host->bbm_size = 1;
2612 }
2613 } else {
2614
2615 host->ecc_bytes_hw = 10;
2616
2617 if (wide_bus) {
2618 host->spare_bytes = 0;
2619 host->bbm_size = 2;
2620 } else {
2621 host->spare_bytes = 1;
2622 host->bbm_size = 1;
2623 }
2624 }
2625 }
2626
2627
2628
2629
2630
2631
2632
2633 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2634
2635 ecc->read_page = qcom_nandc_read_page;
2636 ecc->read_page_raw = qcom_nandc_read_page_raw;
2637 ecc->read_oob = qcom_nandc_read_oob;
2638 ecc->write_page = qcom_nandc_write_page;
2639 ecc->write_page_raw = qcom_nandc_write_page_raw;
2640 ecc->write_oob = qcom_nandc_write_oob;
2641
2642 ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2643
2644 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2645
2646 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2647 cwperpage);
2648
2649
2650
2651
2652
2653
2654 host->cw_data = 516;
2655
2656
2657
2658
2659
2660 host->cw_size = host->cw_data + ecc->bytes;
2661 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2662
2663 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2664 | host->cw_data << UD_SIZE_BYTES
2665 | 0 << DISABLE_STATUS_AFTER_WRITE
2666 | 5 << NUM_ADDR_CYCLES
2667 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2668 | 0 << STATUS_BFR_READ
2669 | 1 << SET_RD_MODE_AFTER_STATUS
2670 | host->spare_bytes << SPARE_SIZE_BYTES;
2671
2672 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2673 | 0 << CS_ACTIVE_BSY
2674 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2675 | 0 << BAD_BLOCK_IN_SPARE_AREA
2676 | 2 << WR_RD_BSY_GAP
2677 | wide_bus << WIDE_FLASH
2678 | host->bch_enabled << ENABLE_BCH_ECC;
2679
2680 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2681 | host->cw_size << UD_SIZE_BYTES
2682 | 5 << NUM_ADDR_CYCLES
2683 | 0 << SPARE_SIZE_BYTES;
2684
2685 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2686 | 0 << CS_ACTIVE_BSY
2687 | 17 << BAD_BLOCK_BYTE_NUM
2688 | 1 << BAD_BLOCK_IN_SPARE_AREA
2689 | 2 << WR_RD_BSY_GAP
2690 | wide_bus << WIDE_FLASH
2691 | 1 << DEV0_CFG1_ECC_DISABLE;
2692
2693 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2694 | 0 << ECC_SW_RESET
2695 | host->cw_data << ECC_NUM_DATA_BYTES
2696 | 1 << ECC_FORCE_CLK_OPEN
2697 | ecc_mode << ECC_MODE
2698 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2699
2700 if (!nandc->props->qpic_v2)
2701 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2702
2703 host->clrflashstatus = FS_READY_BSY_N;
2704 host->clrreadstatus = 0xc0;
2705 nandc->regs->erased_cw_detect_cfg_clr =
2706 cpu_to_le32(CLR_ERASED_PAGE_DET);
2707 nandc->regs->erased_cw_detect_cfg_set =
2708 cpu_to_le32(SET_ERASED_PAGE_DET);
2709
2710 dev_dbg(nandc->dev,
2711 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2712 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2713 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2714 cwperpage);
2715
2716 return 0;
2717}
2718
2719static const struct nand_controller_ops qcom_nandc_ops = {
2720 .attach_chip = qcom_nand_attach_chip,
2721};
2722
2723static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2724{
2725 if (nandc->props->is_bam) {
2726 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2727 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2728 MAX_REG_RD *
2729 sizeof(*nandc->reg_read_buf),
2730 DMA_FROM_DEVICE);
2731
2732 if (nandc->tx_chan)
2733 dma_release_channel(nandc->tx_chan);
2734
2735 if (nandc->rx_chan)
2736 dma_release_channel(nandc->rx_chan);
2737
2738 if (nandc->cmd_chan)
2739 dma_release_channel(nandc->cmd_chan);
2740 } else {
2741 if (nandc->chan)
2742 dma_release_channel(nandc->chan);
2743 }
2744}
2745
2746static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2747{
2748 int ret;
2749
2750 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2751 if (ret) {
2752 dev_err(nandc->dev, "failed to set DMA mask\n");
2753 return ret;
2754 }
2755
2756
2757
2758
2759
2760
2761
2762 nandc->buf_size = 532;
2763
2764 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2765 GFP_KERNEL);
2766 if (!nandc->data_buffer)
2767 return -ENOMEM;
2768
2769 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2770 GFP_KERNEL);
2771 if (!nandc->regs)
2772 return -ENOMEM;
2773
2774 nandc->reg_read_buf = devm_kcalloc(nandc->dev,
2775 MAX_REG_RD, sizeof(*nandc->reg_read_buf),
2776 GFP_KERNEL);
2777 if (!nandc->reg_read_buf)
2778 return -ENOMEM;
2779
2780 if (nandc->props->is_bam) {
2781 nandc->reg_read_dma =
2782 dma_map_single(nandc->dev, nandc->reg_read_buf,
2783 MAX_REG_RD *
2784 sizeof(*nandc->reg_read_buf),
2785 DMA_FROM_DEVICE);
2786 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2787 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2788 return -EIO;
2789 }
2790
2791 nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
2792 if (IS_ERR(nandc->tx_chan)) {
2793 ret = PTR_ERR(nandc->tx_chan);
2794 nandc->tx_chan = NULL;
2795 dev_err_probe(nandc->dev, ret,
2796 "tx DMA channel request failed\n");
2797 goto unalloc;
2798 }
2799
2800 nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
2801 if (IS_ERR(nandc->rx_chan)) {
2802 ret = PTR_ERR(nandc->rx_chan);
2803 nandc->rx_chan = NULL;
2804 dev_err_probe(nandc->dev, ret,
2805 "rx DMA channel request failed\n");
2806 goto unalloc;
2807 }
2808
2809 nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
2810 if (IS_ERR(nandc->cmd_chan)) {
2811 ret = PTR_ERR(nandc->cmd_chan);
2812 nandc->cmd_chan = NULL;
2813 dev_err_probe(nandc->dev, ret,
2814 "cmd DMA channel request failed\n");
2815 goto unalloc;
2816 }
2817
2818
2819
2820
2821
2822
2823
2824 nandc->max_cwperpage = 1;
2825 nandc->bam_txn = alloc_bam_transaction(nandc);
2826 if (!nandc->bam_txn) {
2827 dev_err(nandc->dev,
2828 "failed to allocate bam transaction\n");
2829 ret = -ENOMEM;
2830 goto unalloc;
2831 }
2832 } else {
2833 nandc->chan = dma_request_chan(nandc->dev, "rxtx");
2834 if (IS_ERR(nandc->chan)) {
2835 ret = PTR_ERR(nandc->chan);
2836 nandc->chan = NULL;
2837 dev_err_probe(nandc->dev, ret,
2838 "rxtx DMA channel request failed\n");
2839 return ret;
2840 }
2841 }
2842
2843 INIT_LIST_HEAD(&nandc->desc_list);
2844 INIT_LIST_HEAD(&nandc->host_list);
2845
2846 nand_controller_init(&nandc->controller);
2847 nandc->controller.ops = &qcom_nandc_ops;
2848
2849 return 0;
2850unalloc:
2851 qcom_nandc_unalloc(nandc);
2852 return ret;
2853}
2854
2855
2856static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2857{
2858 u32 nand_ctrl;
2859
2860
2861 if (!nandc->props->is_qpic)
2862 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
2863
2864 if (!nandc->props->qpic_v2)
2865 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2866 NAND_DEV_CMD_VLD_VAL);
2867
2868
2869 if (nandc->props->is_bam) {
2870 nand_ctrl = nandc_read(nandc, NAND_CTRL);
2871
2872
2873
2874
2875
2876
2877
2878
2879 if (!(nand_ctrl & BAM_MODE_EN))
2880 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2881 } else {
2882 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2883 }
2884
2885
2886 if (!nandc->props->qpic_v2) {
2887 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
2888 nandc->vld = NAND_DEV_CMD_VLD_VAL;
2889 }
2890
2891 return 0;
2892}
2893
2894static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
2895
2896static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2897 struct qcom_nand_host *host,
2898 struct device_node *dn)
2899{
2900 struct nand_chip *chip = &host->chip;
2901 struct mtd_info *mtd = nand_to_mtd(chip);
2902 struct device *dev = nandc->dev;
2903 int ret;
2904
2905 ret = of_property_read_u32(dn, "reg", &host->cs);
2906 if (ret) {
2907 dev_err(dev, "can't get chip-select\n");
2908 return -ENXIO;
2909 }
2910
2911 nand_set_flash_node(chip, dn);
2912 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2913 if (!mtd->name)
2914 return -ENOMEM;
2915
2916 mtd->owner = THIS_MODULE;
2917 mtd->dev.parent = dev;
2918
2919 chip->legacy.cmdfunc = qcom_nandc_command;
2920 chip->legacy.select_chip = qcom_nandc_select_chip;
2921 chip->legacy.read_byte = qcom_nandc_read_byte;
2922 chip->legacy.read_buf = qcom_nandc_read_buf;
2923 chip->legacy.write_buf = qcom_nandc_write_buf;
2924 chip->legacy.set_features = nand_get_set_features_notsupp;
2925 chip->legacy.get_features = nand_get_set_features_notsupp;
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935 chip->legacy.block_bad = qcom_nandc_block_bad;
2936 chip->legacy.block_markbad = qcom_nandc_block_markbad;
2937
2938 chip->controller = &nandc->controller;
2939 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
2940 NAND_SKIP_BBTSCAN;
2941
2942
2943 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2944
2945 ret = nand_scan(chip, 1);
2946 if (ret)
2947 return ret;
2948
2949 if (nandc->props->is_bam) {
2950 free_bam_transaction(nandc);
2951 nandc->bam_txn = alloc_bam_transaction(nandc);
2952 if (!nandc->bam_txn) {
2953 dev_err(nandc->dev,
2954 "failed to allocate bam transaction\n");
2955 nand_cleanup(chip);
2956 return -ENOMEM;
2957 }
2958 }
2959
2960 ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
2961 if (ret)
2962 nand_cleanup(chip);
2963
2964 return ret;
2965}
2966
2967static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2968{
2969 struct device *dev = nandc->dev;
2970 struct device_node *dn = dev->of_node, *child;
2971 struct qcom_nand_host *host;
2972 int ret = -ENODEV;
2973
2974 for_each_available_child_of_node(dn, child) {
2975 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2976 if (!host) {
2977 of_node_put(child);
2978 return -ENOMEM;
2979 }
2980
2981 ret = qcom_nand_host_init_and_register(nandc, host, child);
2982 if (ret) {
2983 devm_kfree(dev, host);
2984 continue;
2985 }
2986
2987 list_add_tail(&host->node, &nandc->host_list);
2988 }
2989
2990 return ret;
2991}
2992
2993
2994static int qcom_nandc_parse_dt(struct platform_device *pdev)
2995{
2996 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2997 struct device_node *np = nandc->dev->of_node;
2998 int ret;
2999
3000 if (!nandc->props->is_bam) {
3001 ret = of_property_read_u32(np, "qcom,cmd-crci",
3002 &nandc->cmd_crci);
3003 if (ret) {
3004 dev_err(nandc->dev, "command CRCI unspecified\n");
3005 return ret;
3006 }
3007
3008 ret = of_property_read_u32(np, "qcom,data-crci",
3009 &nandc->data_crci);
3010 if (ret) {
3011 dev_err(nandc->dev, "data CRCI unspecified\n");
3012 return ret;
3013 }
3014 }
3015
3016 return 0;
3017}
3018
3019static int qcom_nandc_probe(struct platform_device *pdev)
3020{
3021 struct qcom_nand_controller *nandc;
3022 const void *dev_data;
3023 struct device *dev = &pdev->dev;
3024 struct resource *res;
3025 int ret;
3026
3027 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
3028 if (!nandc)
3029 return -ENOMEM;
3030
3031 platform_set_drvdata(pdev, nandc);
3032 nandc->dev = dev;
3033
3034 dev_data = of_device_get_match_data(dev);
3035 if (!dev_data) {
3036 dev_err(&pdev->dev, "failed to get device data\n");
3037 return -ENODEV;
3038 }
3039
3040 nandc->props = dev_data;
3041
3042 nandc->core_clk = devm_clk_get(dev, "core");
3043 if (IS_ERR(nandc->core_clk))
3044 return PTR_ERR(nandc->core_clk);
3045
3046 nandc->aon_clk = devm_clk_get(dev, "aon");
3047 if (IS_ERR(nandc->aon_clk))
3048 return PTR_ERR(nandc->aon_clk);
3049
3050 ret = qcom_nandc_parse_dt(pdev);
3051 if (ret)
3052 return ret;
3053
3054 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3055 nandc->base = devm_ioremap_resource(dev, res);
3056 if (IS_ERR(nandc->base))
3057 return PTR_ERR(nandc->base);
3058
3059 nandc->base_phys = res->start;
3060 nandc->base_dma = dma_map_resource(dev, res->start,
3061 resource_size(res),
3062 DMA_BIDIRECTIONAL, 0);
3063 if (dma_mapping_error(dev, nandc->base_dma))
3064 return -ENXIO;
3065
3066 ret = qcom_nandc_alloc(nandc);
3067 if (ret)
3068 goto err_nandc_alloc;
3069
3070 ret = clk_prepare_enable(nandc->core_clk);
3071 if (ret)
3072 goto err_core_clk;
3073
3074 ret = clk_prepare_enable(nandc->aon_clk);
3075 if (ret)
3076 goto err_aon_clk;
3077
3078 ret = qcom_nandc_setup(nandc);
3079 if (ret)
3080 goto err_setup;
3081
3082 ret = qcom_probe_nand_devices(nandc);
3083 if (ret)
3084 goto err_setup;
3085
3086 return 0;
3087
3088err_setup:
3089 clk_disable_unprepare(nandc->aon_clk);
3090err_aon_clk:
3091 clk_disable_unprepare(nandc->core_clk);
3092err_core_clk:
3093 qcom_nandc_unalloc(nandc);
3094err_nandc_alloc:
3095 dma_unmap_resource(dev, res->start, resource_size(res),
3096 DMA_BIDIRECTIONAL, 0);
3097
3098 return ret;
3099}
3100
3101static int qcom_nandc_remove(struct platform_device *pdev)
3102{
3103 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3104 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3105 struct qcom_nand_host *host;
3106 struct nand_chip *chip;
3107 int ret;
3108
3109 list_for_each_entry(host, &nandc->host_list, node) {
3110 chip = &host->chip;
3111 ret = mtd_device_unregister(nand_to_mtd(chip));
3112 WARN_ON(ret);
3113 nand_cleanup(chip);
3114 }
3115
3116 qcom_nandc_unalloc(nandc);
3117
3118 clk_disable_unprepare(nandc->aon_clk);
3119 clk_disable_unprepare(nandc->core_clk);
3120
3121 dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
3122 DMA_BIDIRECTIONAL, 0);
3123
3124 return 0;
3125}
3126
3127static const struct qcom_nandc_props ipq806x_nandc_props = {
3128 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3129 .is_bam = false,
3130 .dev_cmd_reg_start = 0x0,
3131};
3132
3133static const struct qcom_nandc_props ipq4019_nandc_props = {
3134 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3135 .is_bam = true,
3136 .is_qpic = true,
3137 .dev_cmd_reg_start = 0x0,
3138};
3139
3140static const struct qcom_nandc_props ipq8074_nandc_props = {
3141 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3142 .is_bam = true,
3143 .is_qpic = true,
3144 .dev_cmd_reg_start = 0x7000,
3145};
3146
3147static const struct qcom_nandc_props sdx55_nandc_props = {
3148 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3149 .is_bam = true,
3150 .is_qpic = true,
3151 .qpic_v2 = true,
3152 .dev_cmd_reg_start = 0x7000,
3153};
3154
3155
3156
3157
3158
3159static const struct of_device_id qcom_nandc_of_match[] = {
3160 {
3161 .compatible = "qcom,ipq806x-nand",
3162 .data = &ipq806x_nandc_props,
3163 },
3164 {
3165 .compatible = "qcom,ipq4019-nand",
3166 .data = &ipq4019_nandc_props,
3167 },
3168 {
3169 .compatible = "qcom,ipq6018-nand",
3170 .data = &ipq8074_nandc_props,
3171 },
3172 {
3173 .compatible = "qcom,ipq8074-nand",
3174 .data = &ipq8074_nandc_props,
3175 },
3176 {
3177 .compatible = "qcom,sdx55-nand",
3178 .data = &sdx55_nandc_props,
3179 },
3180 {}
3181};
3182MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3183
3184static struct platform_driver qcom_nandc_driver = {
3185 .driver = {
3186 .name = "qcom-nandc",
3187 .of_match_table = qcom_nandc_of_match,
3188 },
3189 .probe = qcom_nandc_probe,
3190 .remove = qcom_nandc_remove,
3191};
3192module_platform_driver(qcom_nandc_driver);
3193
3194MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3195MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3196MODULE_LICENSE("GPL v2");
3197