1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/bitops.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/module.h>
20#include <linux/mtd/nand.h>
21#include <linux/mtd/partitions.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_mtd.h>
25#include <linux/delay.h>
26
27
28#define NAND_FLASH_CMD 0x00
29#define NAND_ADDR0 0x04
30#define NAND_ADDR1 0x08
31#define NAND_FLASH_CHIP_SELECT 0x0c
32#define NAND_EXEC_CMD 0x10
33#define NAND_FLASH_STATUS 0x14
34#define NAND_BUFFER_STATUS 0x18
35#define NAND_DEV0_CFG0 0x20
36#define NAND_DEV0_CFG1 0x24
37#define NAND_DEV0_ECC_CFG 0x28
38#define NAND_DEV1_ECC_CFG 0x2c
39#define NAND_DEV1_CFG0 0x30
40#define NAND_DEV1_CFG1 0x34
41#define NAND_READ_ID 0x40
42#define NAND_READ_STATUS 0x44
43#define NAND_DEV_CMD0 0xa0
44#define NAND_DEV_CMD1 0xa4
45#define NAND_DEV_CMD2 0xa8
46#define NAND_DEV_CMD_VLD 0xac
47#define SFLASHC_BURST_CFG 0xe0
48#define NAND_ERASED_CW_DETECT_CFG 0xe8
49#define NAND_ERASED_CW_DETECT_STATUS 0xec
50#define NAND_EBI2_ECC_BUF_CFG 0xf0
51#define FLASH_BUF_ACC 0x100
52
53#define NAND_CTRL 0xf00
54#define NAND_VERSION 0xf08
55#define NAND_READ_LOCATION_0 0xf20
56#define NAND_READ_LOCATION_1 0xf24
57
58
59#define NAND_DEV_CMD1_RESTORE 0xdead
60#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
61
62
63#define PAGE_ACC BIT(4)
64#define LAST_PAGE BIT(5)
65
66
67#define NAND_DEV_SEL 0
68#define DM_EN BIT(2)
69
70
71#define FS_OP_ERR BIT(4)
72#define FS_READY_BSY_N BIT(5)
73#define FS_MPU_ERR BIT(8)
74#define FS_DEVICE_STS_ERR BIT(16)
75#define FS_DEVICE_WP BIT(23)
76
77
78#define BS_UNCORRECTABLE_BIT BIT(8)
79#define BS_CORRECTABLE_ERR_MSK 0x1f
80
81
82#define DISABLE_STATUS_AFTER_WRITE 4
83#define CW_PER_PAGE 6
84#define UD_SIZE_BYTES 9
85#define ECC_PARITY_SIZE_BYTES_RS 19
86#define SPARE_SIZE_BYTES 23
87#define NUM_ADDR_CYCLES 27
88#define STATUS_BFR_READ 30
89#define SET_RD_MODE_AFTER_STATUS 31
90
91
92#define DEV0_CFG1_ECC_DISABLE 0
93#define WIDE_FLASH 1
94#define NAND_RECOVERY_CYCLES 2
95#define CS_ACTIVE_BSY 5
96#define BAD_BLOCK_BYTE_NUM 6
97#define BAD_BLOCK_IN_SPARE_AREA 16
98#define WR_RD_BSY_GAP 17
99#define ENABLE_BCH_ECC 27
100
101
102#define ECC_CFG_ECC_DISABLE 0
103#define ECC_SW_RESET 1
104#define ECC_MODE 4
105#define ECC_PARITY_SIZE_BYTES_BCH 8
106#define ECC_NUM_DATA_BYTES 16
107#define ECC_FORCE_CLK_OPEN 30
108
109
110#define READ_ADDR 0
111
112
113#define READ_START_VLD 0
114
115
116#define NUM_STEPS 0
117
118
119#define ERASED_CW_ECC_MASK 1
120#define AUTO_DETECT_RES 0
121#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
122#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
123#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
124#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
125#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
126
127
128#define PAGE_ALL_ERASED BIT(7)
129#define CODEWORD_ALL_ERASED BIT(6)
130#define PAGE_ERASED BIT(5)
131#define CODEWORD_ERASED BIT(4)
132#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
133#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
134
135
136#define NAND_VERSION_MAJOR_MASK 0xf0000000
137#define NAND_VERSION_MAJOR_SHIFT 28
138#define NAND_VERSION_MINOR_MASK 0x0fff0000
139#define NAND_VERSION_MINOR_SHIFT 16
140
141
142#define PAGE_READ 0x2
143#define PAGE_READ_WITH_ECC 0x3
144#define PAGE_READ_WITH_ECC_SPARE 0x4
145#define PROGRAM_PAGE 0x6
146#define PAGE_PROGRAM_WITH_ECC 0x7
147#define PROGRAM_PAGE_SPARE 0x9
148#define BLOCK_ERASE 0xa
149#define FETCH_ID 0xb
150#define RESET_DEVICE 0xd
151
152
153
154
155
156#define NANDC_STEP_SIZE 512
157
158
159
160
161
162#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
163
164
165#define MAX_REG_RD (3 * MAX_NUM_STEPS)
166
167
168#define ECC_NONE BIT(0)
169#define ECC_RS_4BIT BIT(1)
170#define ECC_BCH_4BIT BIT(2)
171#define ECC_BCH_8BIT BIT(3)
172
173struct desc_info {
174 struct list_head node;
175
176 enum dma_data_direction dir;
177 struct scatterlist sgl;
178 struct dma_async_tx_descriptor *dma_desc;
179};
180
181
182
183
184
185struct nandc_regs {
186 __le32 cmd;
187 __le32 addr0;
188 __le32 addr1;
189 __le32 chip_sel;
190 __le32 exec;
191
192 __le32 cfg0;
193 __le32 cfg1;
194 __le32 ecc_bch_cfg;
195
196 __le32 clrflashstatus;
197 __le32 clrreadstatus;
198
199 __le32 cmd1;
200 __le32 vld;
201
202 __le32 orig_cmd1;
203 __le32 orig_vld;
204
205 __le32 ecc_buf_cfg;
206};
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239struct qcom_nand_controller {
240 struct nand_hw_control controller;
241 struct list_head host_list;
242
243 struct device *dev;
244
245 void __iomem *base;
246 dma_addr_t base_dma;
247
248 struct clk *core_clk;
249 struct clk *aon_clk;
250
251 struct dma_chan *chan;
252 unsigned int cmd_crci;
253 unsigned int data_crci;
254 struct list_head desc_list;
255
256 u8 *data_buffer;
257 int buf_size;
258 int buf_count;
259 int buf_start;
260
261 __le32 *reg_read_buf;
262 int reg_read_pos;
263
264 struct nandc_regs *regs;
265
266 u32 cmd1, vld;
267 u32 ecc_modes;
268};
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297struct qcom_nand_host {
298 struct nand_chip chip;
299 struct list_head node;
300
301 int cs;
302 int cw_size;
303 int cw_data;
304 bool use_ecc;
305 bool bch_enabled;
306 int ecc_bytes_hw;
307 int spare_bytes;
308 int bbm_size;
309 u8 status;
310 int last_command;
311
312 u32 cfg0, cfg1;
313 u32 cfg0_raw, cfg1_raw;
314 u32 ecc_buf_cfg;
315 u32 ecc_bch_cfg;
316 u32 clrflashstatus;
317 u32 clrreadstatus;
318};
319
320static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
321{
322 return container_of(chip, struct qcom_nand_host, chip);
323}
324
325static inline struct qcom_nand_controller *
326get_qcom_nand_controller(struct nand_chip *chip)
327{
328 return container_of(chip->controller, struct qcom_nand_controller,
329 controller);
330}
331
332static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
333{
334 return ioread32(nandc->base + offset);
335}
336
337static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
338 u32 val)
339{
340 iowrite32(val, nandc->base + offset);
341}
342
343static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
344{
345 switch (offset) {
346 case NAND_FLASH_CMD:
347 return ®s->cmd;
348 case NAND_ADDR0:
349 return ®s->addr0;
350 case NAND_ADDR1:
351 return ®s->addr1;
352 case NAND_FLASH_CHIP_SELECT:
353 return ®s->chip_sel;
354 case NAND_EXEC_CMD:
355 return ®s->exec;
356 case NAND_FLASH_STATUS:
357 return ®s->clrflashstatus;
358 case NAND_DEV0_CFG0:
359 return ®s->cfg0;
360 case NAND_DEV0_CFG1:
361 return ®s->cfg1;
362 case NAND_DEV0_ECC_CFG:
363 return ®s->ecc_bch_cfg;
364 case NAND_READ_STATUS:
365 return ®s->clrreadstatus;
366 case NAND_DEV_CMD1:
367 return ®s->cmd1;
368 case NAND_DEV_CMD1_RESTORE:
369 return ®s->orig_cmd1;
370 case NAND_DEV_CMD_VLD:
371 return ®s->vld;
372 case NAND_DEV_CMD_VLD_RESTORE:
373 return ®s->orig_vld;
374 case NAND_EBI2_ECC_BUF_CFG:
375 return ®s->ecc_buf_cfg;
376 default:
377 return NULL;
378 }
379}
380
381static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
382 u32 val)
383{
384 struct nandc_regs *regs = nandc->regs;
385 __le32 *reg;
386
387 reg = offset_to_nandc_reg(regs, offset);
388
389 if (reg)
390 *reg = cpu_to_le32(val);
391}
392
393
394static void set_address(struct qcom_nand_host *host, u16 column, int page)
395{
396 struct nand_chip *chip = &host->chip;
397 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
398
399 if (chip->options & NAND_BUSWIDTH_16)
400 column >>= 1;
401
402 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
403 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
404}
405
406
407
408
409
410
411
412
413static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
414{
415 struct nand_chip *chip = &host->chip;
416 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
417 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
418
419 if (read) {
420 if (host->use_ecc)
421 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
422 else
423 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
424 } else {
425 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
426 }
427
428 if (host->use_ecc) {
429 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
430 (num_cw - 1) << CW_PER_PAGE;
431
432 cfg1 = host->cfg1;
433 ecc_bch_cfg = host->ecc_bch_cfg;
434 } else {
435 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
436 (num_cw - 1) << CW_PER_PAGE;
437
438 cfg1 = host->cfg1_raw;
439 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
440 }
441
442 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
443 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
444 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
445 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
446 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
447 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
448 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
449 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
450}
451
452static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
453 int reg_off, const void *vaddr, int size,
454 bool flow_control)
455{
456 struct desc_info *desc;
457 struct dma_async_tx_descriptor *dma_desc;
458 struct scatterlist *sgl;
459 struct dma_slave_config slave_conf;
460 enum dma_transfer_direction dir_eng;
461 int ret;
462
463 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
464 if (!desc)
465 return -ENOMEM;
466
467 sgl = &desc->sgl;
468
469 sg_init_one(sgl, vaddr, size);
470
471 if (read) {
472 dir_eng = DMA_DEV_TO_MEM;
473 desc->dir = DMA_FROM_DEVICE;
474 } else {
475 dir_eng = DMA_MEM_TO_DEV;
476 desc->dir = DMA_TO_DEVICE;
477 }
478
479 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
480 if (ret == 0) {
481 ret = -ENOMEM;
482 goto err;
483 }
484
485 memset(&slave_conf, 0x00, sizeof(slave_conf));
486
487 slave_conf.device_fc = flow_control;
488 if (read) {
489 slave_conf.src_maxburst = 16;
490 slave_conf.src_addr = nandc->base_dma + reg_off;
491 slave_conf.slave_id = nandc->data_crci;
492 } else {
493 slave_conf.dst_maxburst = 16;
494 slave_conf.dst_addr = nandc->base_dma + reg_off;
495 slave_conf.slave_id = nandc->cmd_crci;
496 }
497
498 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
499 if (ret) {
500 dev_err(nandc->dev, "failed to configure dma channel\n");
501 goto err;
502 }
503
504 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
505 if (!dma_desc) {
506 dev_err(nandc->dev, "failed to prepare desc\n");
507 ret = -EINVAL;
508 goto err;
509 }
510
511 desc->dma_desc = dma_desc;
512
513 list_add_tail(&desc->node, &nandc->desc_list);
514
515 return 0;
516err:
517 kfree(desc);
518
519 return ret;
520}
521
522
523
524
525
526
527
528
529static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
530 int num_regs)
531{
532 bool flow_control = false;
533 void *vaddr;
534 int size;
535
536 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
537 flow_control = true;
538
539 size = num_regs * sizeof(u32);
540 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
541 nandc->reg_read_pos += num_regs;
542
543 return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
544}
545
546
547
548
549
550
551
552
553static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
554 int num_regs)
555{
556 bool flow_control = false;
557 struct nandc_regs *regs = nandc->regs;
558 void *vaddr;
559 int size;
560
561 vaddr = offset_to_nandc_reg(regs, first);
562
563 if (first == NAND_FLASH_CMD)
564 flow_control = true;
565
566 if (first == NAND_DEV_CMD1_RESTORE)
567 first = NAND_DEV_CMD1;
568
569 if (first == NAND_DEV_CMD_VLD_RESTORE)
570 first = NAND_DEV_CMD_VLD;
571
572 size = num_regs * sizeof(u32);
573
574 return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
575}
576
577
578
579
580
581
582
583
584
585static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
586 const u8 *vaddr, int size)
587{
588 return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
589}
590
591
592
593
594
595
596
597
598
599static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
600 const u8 *vaddr, int size)
601{
602 return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
603}
604
605
606
607
608
609static void config_cw_read(struct qcom_nand_controller *nandc)
610{
611 write_reg_dma(nandc, NAND_FLASH_CMD, 3);
612 write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
613 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
614
615 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
616
617 read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
618 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
619}
620
621
622
623
624
625static void config_cw_write_pre(struct qcom_nand_controller *nandc)
626{
627 write_reg_dma(nandc, NAND_FLASH_CMD, 3);
628 write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
629 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
630}
631
632static void config_cw_write_post(struct qcom_nand_controller *nandc)
633{
634 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
635
636 read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
637
638 write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
639 write_reg_dma(nandc, NAND_READ_STATUS, 1);
640}
641
642
643
644
645
646
647
648static int nandc_param(struct qcom_nand_host *host)
649{
650 struct nand_chip *chip = &host->chip;
651 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
652
653
654
655
656
657
658 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
659 nandc_set_reg(nandc, NAND_ADDR0, 0);
660 nandc_set_reg(nandc, NAND_ADDR1, 0);
661 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
662 | 512 << UD_SIZE_BYTES
663 | 5 << NUM_ADDR_CYCLES
664 | 0 << SPARE_SIZE_BYTES);
665 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
666 | 0 << CS_ACTIVE_BSY
667 | 17 << BAD_BLOCK_BYTE_NUM
668 | 1 << BAD_BLOCK_IN_SPARE_AREA
669 | 2 << WR_RD_BSY_GAP
670 | 0 << WIDE_FLASH
671 | 1 << DEV0_CFG1_ECC_DISABLE);
672 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
673
674
675 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
676 (nandc->vld & ~(1 << READ_START_VLD))
677 | 0 << READ_START_VLD);
678 nandc_set_reg(nandc, NAND_DEV_CMD1,
679 (nandc->cmd1 & ~(0xFF << READ_ADDR))
680 | NAND_CMD_PARAM << READ_ADDR);
681
682 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
683
684 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
685 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
686
687 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
688 write_reg_dma(nandc, NAND_DEV_CMD1, 1);
689
690 nandc->buf_count = 512;
691 memset(nandc->data_buffer, 0xff, nandc->buf_count);
692
693 config_cw_read(nandc);
694
695 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
696 nandc->buf_count);
697
698
699 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
700 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
701
702 return 0;
703}
704
705
706static int erase_block(struct qcom_nand_host *host, int page_addr)
707{
708 struct nand_chip *chip = &host->chip;
709 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
710
711 nandc_set_reg(nandc, NAND_FLASH_CMD,
712 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
713 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
714 nandc_set_reg(nandc, NAND_ADDR1, 0);
715 nandc_set_reg(nandc, NAND_DEV0_CFG0,
716 host->cfg0_raw & ~(7 << CW_PER_PAGE));
717 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
718 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
719 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
720 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
721
722 write_reg_dma(nandc, NAND_FLASH_CMD, 3);
723 write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
724 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
725
726 read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
727
728 write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
729 write_reg_dma(nandc, NAND_READ_STATUS, 1);
730
731 return 0;
732}
733
734
735static int read_id(struct qcom_nand_host *host, int column)
736{
737 struct nand_chip *chip = &host->chip;
738 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
739
740 if (column == -1)
741 return 0;
742
743 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
744 nandc_set_reg(nandc, NAND_ADDR0, column);
745 nandc_set_reg(nandc, NAND_ADDR1, 0);
746 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
747 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
748
749 write_reg_dma(nandc, NAND_FLASH_CMD, 4);
750 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
751
752 read_reg_dma(nandc, NAND_READ_ID, 1);
753
754 return 0;
755}
756
757
758static int reset(struct qcom_nand_host *host)
759{
760 struct nand_chip *chip = &host->chip;
761 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
762
763 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
764 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
765
766 write_reg_dma(nandc, NAND_FLASH_CMD, 1);
767 write_reg_dma(nandc, NAND_EXEC_CMD, 1);
768
769 read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
770
771 return 0;
772}
773
774
775static int submit_descs(struct qcom_nand_controller *nandc)
776{
777 struct desc_info *desc;
778 dma_cookie_t cookie = 0;
779
780 list_for_each_entry(desc, &nandc->desc_list, node)
781 cookie = dmaengine_submit(desc->dma_desc);
782
783 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
784 return -ETIMEDOUT;
785
786 return 0;
787}
788
789static void free_descs(struct qcom_nand_controller *nandc)
790{
791 struct desc_info *desc, *n;
792
793 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
794 list_del(&desc->node);
795 dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
796 kfree(desc);
797 }
798}
799
800
801static void clear_read_regs(struct qcom_nand_controller *nandc)
802{
803 nandc->reg_read_pos = 0;
804 memset(nandc->reg_read_buf, 0,
805 MAX_REG_RD * sizeof(*nandc->reg_read_buf));
806}
807
808static void pre_command(struct qcom_nand_host *host, int command)
809{
810 struct nand_chip *chip = &host->chip;
811 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
812
813 nandc->buf_count = 0;
814 nandc->buf_start = 0;
815 host->use_ecc = false;
816 host->last_command = command;
817
818 clear_read_regs(nandc);
819}
820
821
822
823
824
825
826static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
827{
828 struct nand_chip *chip = &host->chip;
829 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
830 struct nand_ecc_ctrl *ecc = &chip->ecc;
831 int num_cw;
832 int i;
833
834 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
835
836 for (i = 0; i < num_cw; i++) {
837 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
838
839 if (flash_status & FS_MPU_ERR)
840 host->status &= ~NAND_STATUS_WP;
841
842 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
843 (flash_status &
844 FS_DEVICE_STS_ERR)))
845 host->status |= NAND_STATUS_FAIL;
846 }
847}
848
849static void post_command(struct qcom_nand_host *host, int command)
850{
851 struct nand_chip *chip = &host->chip;
852 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
853
854 switch (command) {
855 case NAND_CMD_READID:
856 memcpy(nandc->data_buffer, nandc->reg_read_buf,
857 nandc->buf_count);
858 break;
859 case NAND_CMD_PAGEPROG:
860 case NAND_CMD_ERASE1:
861 parse_erase_write_errors(host, command);
862 break;
863 default:
864 break;
865 }
866}
867
868
869
870
871
872
873
874static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
875 int column, int page_addr)
876{
877 struct nand_chip *chip = mtd_to_nand(mtd);
878 struct qcom_nand_host *host = to_qcom_nand_host(chip);
879 struct nand_ecc_ctrl *ecc = &chip->ecc;
880 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
881 bool wait = false;
882 int ret = 0;
883
884 pre_command(host, command);
885
886 switch (command) {
887 case NAND_CMD_RESET:
888 ret = reset(host);
889 wait = true;
890 break;
891
892 case NAND_CMD_READID:
893 nandc->buf_count = 4;
894 ret = read_id(host, column);
895 wait = true;
896 break;
897
898 case NAND_CMD_PARAM:
899 ret = nandc_param(host);
900 wait = true;
901 break;
902
903 case NAND_CMD_ERASE1:
904 ret = erase_block(host, page_addr);
905 wait = true;
906 break;
907
908 case NAND_CMD_READ0:
909
910 WARN_ON(column != 0);
911
912 host->use_ecc = true;
913 set_address(host, 0, page_addr);
914 update_rw_regs(host, ecc->steps, true);
915 break;
916
917 case NAND_CMD_SEQIN:
918 WARN_ON(column != 0);
919 set_address(host, 0, page_addr);
920 break;
921
922 case NAND_CMD_PAGEPROG:
923 case NAND_CMD_STATUS:
924 case NAND_CMD_NONE:
925 default:
926 break;
927 }
928
929 if (ret) {
930 dev_err(nandc->dev, "failure executing command %d\n",
931 command);
932 free_descs(nandc);
933 return;
934 }
935
936 if (wait) {
937 ret = submit_descs(nandc);
938 if (ret)
939 dev_err(nandc->dev,
940 "failure submitting descs for command %d\n",
941 command);
942 }
943
944 free_descs(nandc);
945
946 post_command(host, command);
947}
948
949
950
951
952
953
954
955
956
957
958
959
960static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
961{
962 u8 empty1, empty2;
963
964
965
966
967
968
969
970 empty1 = data_buf[3];
971 empty2 = data_buf[175];
972
973
974
975
976
977 if ((empty1 == 0x54 && empty2 == 0xff) ||
978 (empty1 == 0xff && empty2 == 0x54)) {
979 data_buf[3] = 0xff;
980 data_buf[175] = 0xff;
981 }
982
983
984
985
986
987 if (memchr_inv(data_buf, 0xff, data_len)) {
988 data_buf[3] = empty1;
989 data_buf[175] = empty2;
990
991 return false;
992 }
993
994 return true;
995}
996
997struct read_stats {
998 __le32 flash;
999 __le32 buffer;
1000 __le32 erased_cw;
1001};
1002
1003
1004
1005
1006
1007static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1008 u8 *oob_buf)
1009{
1010 struct nand_chip *chip = &host->chip;
1011 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1012 struct mtd_info *mtd = nand_to_mtd(chip);
1013 struct nand_ecc_ctrl *ecc = &chip->ecc;
1014 unsigned int max_bitflips = 0;
1015 struct read_stats *buf;
1016 int i;
1017
1018 buf = (struct read_stats *)nandc->reg_read_buf;
1019
1020 for (i = 0; i < ecc->steps; i++, buf++) {
1021 u32 flash, buffer, erased_cw;
1022 int data_len, oob_len;
1023
1024 if (i == (ecc->steps - 1)) {
1025 data_len = ecc->size - ((ecc->steps - 1) << 2);
1026 oob_len = ecc->steps << 2;
1027 } else {
1028 data_len = host->cw_data;
1029 oob_len = 0;
1030 }
1031
1032 flash = le32_to_cpu(buf->flash);
1033 buffer = le32_to_cpu(buf->buffer);
1034 erased_cw = le32_to_cpu(buf->erased_cw);
1035
1036 if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1037 bool erased;
1038
1039
1040 if (host->bch_enabled) {
1041 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1042 true : false;
1043 } else {
1044 erased = erased_chunk_check_and_fixup(data_buf,
1045 data_len);
1046 }
1047
1048 if (erased) {
1049 data_buf += data_len;
1050 if (oob_buf)
1051 oob_buf += oob_len + ecc->bytes;
1052 continue;
1053 }
1054
1055 if (buffer & BS_UNCORRECTABLE_BIT) {
1056 int ret, ecclen, extraooblen;
1057 void *eccbuf;
1058
1059 eccbuf = oob_buf ? oob_buf + oob_len : NULL;
1060 ecclen = oob_buf ? host->ecc_bytes_hw : 0;
1061 extraooblen = oob_buf ? oob_len : 0;
1062
1063
1064
1065
1066
1067 ret = nand_check_erased_ecc_chunk(data_buf,
1068 data_len, eccbuf, ecclen, oob_buf,
1069 extraooblen, ecc->strength);
1070 if (ret < 0) {
1071 mtd->ecc_stats.failed++;
1072 } else {
1073 mtd->ecc_stats.corrected += ret;
1074 max_bitflips =
1075 max_t(unsigned int, max_bitflips, ret);
1076 }
1077 }
1078 } else {
1079 unsigned int stat;
1080
1081 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1082 mtd->ecc_stats.corrected += stat;
1083 max_bitflips = max(max_bitflips, stat);
1084 }
1085
1086 data_buf += data_len;
1087 if (oob_buf)
1088 oob_buf += oob_len + ecc->bytes;
1089 }
1090
1091 return max_bitflips;
1092}
1093
1094
1095
1096
1097
1098static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1099 u8 *oob_buf)
1100{
1101 struct nand_chip *chip = &host->chip;
1102 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1103 struct nand_ecc_ctrl *ecc = &chip->ecc;
1104 int i, ret;
1105
1106
1107 for (i = 0; i < ecc->steps; i++) {
1108 int data_size, oob_size;
1109
1110 if (i == (ecc->steps - 1)) {
1111 data_size = ecc->size - ((ecc->steps - 1) << 2);
1112 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1113 host->spare_bytes;
1114 } else {
1115 data_size = host->cw_data;
1116 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1117 }
1118
1119 config_cw_read(nandc);
1120
1121 if (data_buf)
1122 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
1123 data_size);
1124
1125
1126
1127
1128
1129
1130
1131
1132 if (oob_buf) {
1133 int j;
1134
1135 for (j = 0; j < host->bbm_size; j++)
1136 *oob_buf++ = 0xff;
1137
1138 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
1139 oob_buf, oob_size);
1140 }
1141
1142 if (data_buf)
1143 data_buf += data_size;
1144 if (oob_buf)
1145 oob_buf += oob_size;
1146 }
1147
1148 ret = submit_descs(nandc);
1149 if (ret)
1150 dev_err(nandc->dev, "failure to read page/oob\n");
1151
1152 free_descs(nandc);
1153
1154 return ret;
1155}
1156
1157
1158
1159
1160
1161static int copy_last_cw(struct qcom_nand_host *host, int page)
1162{
1163 struct nand_chip *chip = &host->chip;
1164 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1165 struct nand_ecc_ctrl *ecc = &chip->ecc;
1166 int size;
1167 int ret;
1168
1169 clear_read_regs(nandc);
1170
1171 size = host->use_ecc ? host->cw_data : host->cw_size;
1172
1173
1174 memset(nandc->data_buffer, 0xff, size);
1175
1176 set_address(host, host->cw_size * (ecc->steps - 1), page);
1177 update_rw_regs(host, 1, true);
1178
1179 config_cw_read(nandc);
1180
1181 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
1182
1183 ret = submit_descs(nandc);
1184 if (ret)
1185 dev_err(nandc->dev, "failed to copy last codeword\n");
1186
1187 free_descs(nandc);
1188
1189 return ret;
1190}
1191
1192
1193static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1194 uint8_t *buf, int oob_required, int page)
1195{
1196 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1197 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1198 u8 *data_buf, *oob_buf = NULL;
1199 int ret;
1200
1201 data_buf = buf;
1202 oob_buf = oob_required ? chip->oob_poi : NULL;
1203
1204 ret = read_page_ecc(host, data_buf, oob_buf);
1205 if (ret) {
1206 dev_err(nandc->dev, "failure to read page\n");
1207 return ret;
1208 }
1209
1210 return parse_read_errors(host, data_buf, oob_buf);
1211}
1212
1213
1214static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1215 struct nand_chip *chip, uint8_t *buf,
1216 int oob_required, int page)
1217{
1218 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1219 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1220 u8 *data_buf, *oob_buf;
1221 struct nand_ecc_ctrl *ecc = &chip->ecc;
1222 int i, ret;
1223
1224 data_buf = buf;
1225 oob_buf = chip->oob_poi;
1226
1227 host->use_ecc = false;
1228 update_rw_regs(host, ecc->steps, true);
1229
1230 for (i = 0; i < ecc->steps; i++) {
1231 int data_size1, data_size2, oob_size1, oob_size2;
1232 int reg_off = FLASH_BUF_ACC;
1233
1234 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1235 oob_size1 = host->bbm_size;
1236
1237 if (i == (ecc->steps - 1)) {
1238 data_size2 = ecc->size - data_size1 -
1239 ((ecc->steps - 1) << 2);
1240 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1241 host->spare_bytes;
1242 } else {
1243 data_size2 = host->cw_data - data_size1;
1244 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1245 }
1246
1247 config_cw_read(nandc);
1248
1249 read_data_dma(nandc, reg_off, data_buf, data_size1);
1250 reg_off += data_size1;
1251 data_buf += data_size1;
1252
1253 read_data_dma(nandc, reg_off, oob_buf, oob_size1);
1254 reg_off += oob_size1;
1255 oob_buf += oob_size1;
1256
1257 read_data_dma(nandc, reg_off, data_buf, data_size2);
1258 reg_off += data_size2;
1259 data_buf += data_size2;
1260
1261 read_data_dma(nandc, reg_off, oob_buf, oob_size2);
1262 oob_buf += oob_size2;
1263 }
1264
1265 ret = submit_descs(nandc);
1266 if (ret)
1267 dev_err(nandc->dev, "failure to read raw page\n");
1268
1269 free_descs(nandc);
1270
1271 return 0;
1272}
1273
1274
1275static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1276 int page)
1277{
1278 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1279 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1280 struct nand_ecc_ctrl *ecc = &chip->ecc;
1281 int ret;
1282
1283 clear_read_regs(nandc);
1284
1285 host->use_ecc = true;
1286 set_address(host, 0, page);
1287 update_rw_regs(host, ecc->steps, true);
1288
1289 ret = read_page_ecc(host, NULL, chip->oob_poi);
1290 if (ret)
1291 dev_err(nandc->dev, "failure to read oob\n");
1292
1293 return ret;
1294}
1295
1296
1297static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1298 const uint8_t *buf, int oob_required, int page)
1299{
1300 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1301 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1302 struct nand_ecc_ctrl *ecc = &chip->ecc;
1303 u8 *data_buf, *oob_buf;
1304 int i, ret;
1305
1306 clear_read_regs(nandc);
1307
1308 data_buf = (u8 *)buf;
1309 oob_buf = chip->oob_poi;
1310
1311 host->use_ecc = true;
1312 update_rw_regs(host, ecc->steps, false);
1313
1314 for (i = 0; i < ecc->steps; i++) {
1315 int data_size, oob_size;
1316
1317 if (i == (ecc->steps - 1)) {
1318 data_size = ecc->size - ((ecc->steps - 1) << 2);
1319 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1320 host->spare_bytes;
1321 } else {
1322 data_size = host->cw_data;
1323 oob_size = ecc->bytes;
1324 }
1325
1326 config_cw_write_pre(nandc);
1327
1328 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
1329
1330
1331
1332
1333
1334
1335
1336
1337 if (i == (ecc->steps - 1)) {
1338 oob_buf += host->bbm_size;
1339
1340 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
1341 oob_buf, oob_size);
1342 }
1343
1344 config_cw_write_post(nandc);
1345
1346 data_buf += data_size;
1347 oob_buf += oob_size;
1348 }
1349
1350 ret = submit_descs(nandc);
1351 if (ret)
1352 dev_err(nandc->dev, "failure to write page\n");
1353
1354 free_descs(nandc);
1355
1356 return ret;
1357}
1358
1359
1360static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1361 struct nand_chip *chip, const uint8_t *buf,
1362 int oob_required, int page)
1363{
1364 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1365 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1366 struct nand_ecc_ctrl *ecc = &chip->ecc;
1367 u8 *data_buf, *oob_buf;
1368 int i, ret;
1369
1370 clear_read_regs(nandc);
1371
1372 data_buf = (u8 *)buf;
1373 oob_buf = chip->oob_poi;
1374
1375 host->use_ecc = false;
1376 update_rw_regs(host, ecc->steps, false);
1377
1378 for (i = 0; i < ecc->steps; i++) {
1379 int data_size1, data_size2, oob_size1, oob_size2;
1380 int reg_off = FLASH_BUF_ACC;
1381
1382 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1383 oob_size1 = host->bbm_size;
1384
1385 if (i == (ecc->steps - 1)) {
1386 data_size2 = ecc->size - data_size1 -
1387 ((ecc->steps - 1) << 2);
1388 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1389 host->spare_bytes;
1390 } else {
1391 data_size2 = host->cw_data - data_size1;
1392 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1393 }
1394
1395 config_cw_write_pre(nandc);
1396
1397 write_data_dma(nandc, reg_off, data_buf, data_size1);
1398 reg_off += data_size1;
1399 data_buf += data_size1;
1400
1401 write_data_dma(nandc, reg_off, oob_buf, oob_size1);
1402 reg_off += oob_size1;
1403 oob_buf += oob_size1;
1404
1405 write_data_dma(nandc, reg_off, data_buf, data_size2);
1406 reg_off += data_size2;
1407 data_buf += data_size2;
1408
1409 write_data_dma(nandc, reg_off, oob_buf, oob_size2);
1410 oob_buf += oob_size2;
1411
1412 config_cw_write_post(nandc);
1413 }
1414
1415 ret = submit_descs(nandc);
1416 if (ret)
1417 dev_err(nandc->dev, "failure to write raw page\n");
1418
1419 free_descs(nandc);
1420
1421 return ret;
1422}
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1434 int page)
1435{
1436 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1437 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1438 struct nand_ecc_ctrl *ecc = &chip->ecc;
1439 u8 *oob = chip->oob_poi;
1440 int free_boff;
1441 int data_size, oob_size;
1442 int ret, status = 0;
1443
1444 host->use_ecc = true;
1445
1446 ret = copy_last_cw(host, page);
1447 if (ret)
1448 return ret;
1449
1450 clear_read_regs(nandc);
1451
1452
1453 data_size = ecc->size - ((ecc->steps - 1) << 2);
1454 oob_size = ecc->steps << 2;
1455
1456 free_boff = ecc->layout->oobfree[0].offset;
1457
1458
1459 memcpy(nandc->data_buffer + data_size, oob + free_boff, oob_size);
1460
1461 set_address(host, host->cw_size * (ecc->steps - 1), page);
1462 update_rw_regs(host, 1, false);
1463
1464 config_cw_write_pre(nandc);
1465 write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1466 data_size + oob_size);
1467 config_cw_write_post(nandc);
1468
1469 ret = submit_descs(nandc);
1470
1471 free_descs(nandc);
1472
1473 if (ret) {
1474 dev_err(nandc->dev, "failure to write oob\n");
1475 return -EIO;
1476 }
1477
1478 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1479
1480 status = chip->waitfunc(mtd, chip);
1481
1482 return status & NAND_STATUS_FAIL ? -EIO : 0;
1483}
1484
1485static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
1486{
1487 struct nand_chip *chip = mtd_to_nand(mtd);
1488 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1489 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1490 struct nand_ecc_ctrl *ecc = &chip->ecc;
1491 int page, ret, bbpos, bad = 0;
1492 u32 flash_status;
1493
1494 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1495
1496
1497
1498
1499
1500
1501
1502 host->use_ecc = false;
1503
1504 ret = copy_last_cw(host, page);
1505 if (ret)
1506 goto err;
1507
1508 flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
1509
1510 if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
1511 dev_warn(nandc->dev, "error when trying to read BBM\n");
1512 goto err;
1513 }
1514
1515 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
1516
1517 bad = nandc->data_buffer[bbpos] != 0xff;
1518
1519 if (chip->options & NAND_BUSWIDTH_16)
1520 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
1521err:
1522 return bad;
1523}
1524
1525static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
1526{
1527 struct nand_chip *chip = mtd_to_nand(mtd);
1528 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1529 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1530 struct nand_ecc_ctrl *ecc = &chip->ecc;
1531 int page, ret, status = 0;
1532
1533 clear_read_regs(nandc);
1534
1535
1536
1537
1538
1539
1540 memset(nandc->data_buffer, 0x00, host->cw_size);
1541
1542 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1543
1544
1545 host->use_ecc = false;
1546 set_address(host, host->cw_size * (ecc->steps - 1), page);
1547 update_rw_regs(host, 1, false);
1548
1549 config_cw_write_pre(nandc);
1550 write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
1551 config_cw_write_post(nandc);
1552
1553 ret = submit_descs(nandc);
1554
1555 free_descs(nandc);
1556
1557 if (ret) {
1558 dev_err(nandc->dev, "failure to update BBM\n");
1559 return -EIO;
1560 }
1561
1562 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1563
1564 status = chip->waitfunc(mtd, chip);
1565
1566 return status & NAND_STATUS_FAIL ? -EIO : 0;
1567}
1568
1569
1570
1571
1572
1573
1574
1575static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
1576{
1577 struct nand_chip *chip = mtd_to_nand(mtd);
1578 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1579 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1580 u8 *buf = nandc->data_buffer;
1581 u8 ret = 0x0;
1582
1583 if (host->last_command == NAND_CMD_STATUS) {
1584 ret = host->status;
1585
1586 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
1587
1588 return ret;
1589 }
1590
1591 if (nandc->buf_start < nandc->buf_count)
1592 ret = buf[nandc->buf_start++];
1593
1594 return ret;
1595}
1596
1597static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1598{
1599 struct nand_chip *chip = mtd_to_nand(mtd);
1600 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1601 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
1602
1603 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
1604 nandc->buf_start += real_len;
1605}
1606
1607static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
1608 int len)
1609{
1610 struct nand_chip *chip = mtd_to_nand(mtd);
1611 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1612 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
1613
1614 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
1615
1616 nandc->buf_start += real_len;
1617}
1618
1619
1620static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
1621{
1622 struct nand_chip *chip = mtd_to_nand(mtd);
1623 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1624
1625 if (chipnr <= 0)
1626 return;
1627
1628 dev_warn(nandc->dev, "invalid chip select\n");
1629}
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717static struct nand_ecclayout *
1718qcom_nand_create_layout(struct qcom_nand_host *host)
1719{
1720 struct nand_chip *chip = &host->chip;
1721 struct mtd_info *mtd = nand_to_mtd(chip);
1722 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1723 struct nand_ecc_ctrl *ecc = &chip->ecc;
1724 struct nand_ecclayout *layout;
1725 int i, j, steps, pos = 0, shift = 0;
1726
1727 layout = devm_kzalloc(nandc->dev, sizeof(*layout), GFP_KERNEL);
1728 if (!layout)
1729 return NULL;
1730
1731 steps = mtd->writesize / ecc->size;
1732 layout->eccbytes = steps * ecc->bytes;
1733
1734 layout->oobfree[0].offset = (steps - 1) * ecc->bytes + host->bbm_size;
1735 layout->oobfree[0].length = steps << 2;
1736
1737
1738
1739
1740
1741
1742 for (i = 0; i < steps - 1; i++) {
1743 for (j = 0; j < ecc->bytes; j++)
1744 layout->eccpos[pos++] = i * ecc->bytes + j;
1745 }
1746
1747
1748
1749
1750
1751
1752
1753 for (j = 0; j < host->bbm_size; j++)
1754 layout->eccpos[pos++] = i * ecc->bytes + j;
1755
1756
1757
1758
1759
1760 shift = layout->oobfree[0].length + host->bbm_size;
1761
1762 for (j = 0; j < (host->ecc_bytes_hw + host->spare_bytes); j++)
1763 layout->eccpos[pos++] = i * ecc->bytes + shift + j;
1764
1765 return layout;
1766}
1767
1768static int qcom_nand_host_setup(struct qcom_nand_host *host)
1769{
1770 struct nand_chip *chip = &host->chip;
1771 struct mtd_info *mtd = nand_to_mtd(chip);
1772 struct nand_ecc_ctrl *ecc = &chip->ecc;
1773 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1774 int cwperpage, bad_block_byte;
1775 bool wide_bus;
1776 int ecc_mode = 1;
1777
1778
1779
1780
1781
1782 if (ecc->size != NANDC_STEP_SIZE) {
1783 dev_err(nandc->dev, "invalid ecc size\n");
1784 return -EINVAL;
1785 }
1786
1787 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
1788
1789 if (ecc->strength >= 8) {
1790
1791 host->bch_enabled = true;
1792 ecc_mode = 1;
1793
1794 if (wide_bus) {
1795 host->ecc_bytes_hw = 14;
1796 host->spare_bytes = 0;
1797 host->bbm_size = 2;
1798 } else {
1799 host->ecc_bytes_hw = 13;
1800 host->spare_bytes = 2;
1801 host->bbm_size = 1;
1802 }
1803 } else {
1804
1805
1806
1807
1808
1809 if (nandc->ecc_modes & ECC_BCH_4BIT) {
1810
1811 host->bch_enabled = true;
1812 ecc_mode = 0;
1813
1814 if (wide_bus) {
1815 host->ecc_bytes_hw = 8;
1816 host->spare_bytes = 2;
1817 host->bbm_size = 2;
1818 } else {
1819 host->ecc_bytes_hw = 7;
1820 host->spare_bytes = 4;
1821 host->bbm_size = 1;
1822 }
1823 } else {
1824
1825 host->ecc_bytes_hw = 10;
1826
1827 if (wide_bus) {
1828 host->spare_bytes = 0;
1829 host->bbm_size = 2;
1830 } else {
1831 host->spare_bytes = 1;
1832 host->bbm_size = 1;
1833 }
1834 }
1835 }
1836
1837
1838
1839
1840
1841
1842
1843 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
1844
1845 ecc->read_page = qcom_nandc_read_page;
1846 ecc->read_page_raw = qcom_nandc_read_page_raw;
1847 ecc->read_oob = qcom_nandc_read_oob;
1848 ecc->write_page = qcom_nandc_write_page;
1849 ecc->write_page_raw = qcom_nandc_write_page_raw;
1850 ecc->write_oob = qcom_nandc_write_oob;
1851
1852 ecc->mode = NAND_ECC_HW;
1853
1854 ecc->layout = qcom_nand_create_layout(host);
1855 if (!ecc->layout)
1856 return -ENOMEM;
1857
1858 cwperpage = mtd->writesize / ecc->size;
1859
1860
1861
1862
1863
1864
1865 host->cw_data = 516;
1866
1867
1868
1869
1870
1871 host->cw_size = host->cw_data + ecc->bytes;
1872
1873 if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
1874 dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
1875 return -EINVAL;
1876 }
1877
1878 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
1879
1880 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
1881 | host->cw_data << UD_SIZE_BYTES
1882 | 0 << DISABLE_STATUS_AFTER_WRITE
1883 | 5 << NUM_ADDR_CYCLES
1884 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
1885 | 0 << STATUS_BFR_READ
1886 | 1 << SET_RD_MODE_AFTER_STATUS
1887 | host->spare_bytes << SPARE_SIZE_BYTES;
1888
1889 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
1890 | 0 << CS_ACTIVE_BSY
1891 | bad_block_byte << BAD_BLOCK_BYTE_NUM
1892 | 0 << BAD_BLOCK_IN_SPARE_AREA
1893 | 2 << WR_RD_BSY_GAP
1894 | wide_bus << WIDE_FLASH
1895 | host->bch_enabled << ENABLE_BCH_ECC;
1896
1897 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
1898 | host->cw_size << UD_SIZE_BYTES
1899 | 5 << NUM_ADDR_CYCLES
1900 | 0 << SPARE_SIZE_BYTES;
1901
1902 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
1903 | 0 << CS_ACTIVE_BSY
1904 | 17 << BAD_BLOCK_BYTE_NUM
1905 | 1 << BAD_BLOCK_IN_SPARE_AREA
1906 | 2 << WR_RD_BSY_GAP
1907 | wide_bus << WIDE_FLASH
1908 | 1 << DEV0_CFG1_ECC_DISABLE;
1909
1910 host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
1911 | 0 << ECC_SW_RESET
1912 | host->cw_data << ECC_NUM_DATA_BYTES
1913 | 1 << ECC_FORCE_CLK_OPEN
1914 | ecc_mode << ECC_MODE
1915 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
1916
1917 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
1918
1919 host->clrflashstatus = FS_READY_BSY_N;
1920 host->clrreadstatus = 0xc0;
1921
1922 dev_dbg(nandc->dev,
1923 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
1924 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
1925 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
1926 cwperpage);
1927
1928 return 0;
1929}
1930
1931static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
1932{
1933 int ret;
1934
1935 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
1936 if (ret) {
1937 dev_err(nandc->dev, "failed to set DMA mask\n");
1938 return ret;
1939 }
1940
1941
1942
1943
1944
1945
1946
1947 nandc->buf_size = 532;
1948
1949 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
1950 GFP_KERNEL);
1951 if (!nandc->data_buffer)
1952 return -ENOMEM;
1953
1954 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
1955 GFP_KERNEL);
1956 if (!nandc->regs)
1957 return -ENOMEM;
1958
1959 nandc->reg_read_buf = devm_kzalloc(nandc->dev,
1960 MAX_REG_RD * sizeof(*nandc->reg_read_buf),
1961 GFP_KERNEL);
1962 if (!nandc->reg_read_buf)
1963 return -ENOMEM;
1964
1965 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
1966 if (!nandc->chan) {
1967 dev_err(nandc->dev, "failed to request slave channel\n");
1968 return -ENODEV;
1969 }
1970
1971 INIT_LIST_HEAD(&nandc->desc_list);
1972 INIT_LIST_HEAD(&nandc->host_list);
1973
1974 spin_lock_init(&nandc->controller.lock);
1975 init_waitqueue_head(&nandc->controller.wq);
1976
1977 return 0;
1978}
1979
1980static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
1981{
1982 dma_release_channel(nandc->chan);
1983}
1984
1985
1986static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
1987{
1988
1989 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
1990
1991
1992 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
1993
1994
1995 nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
1996 nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
1997
1998 return 0;
1999}
2000
2001static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2002 struct qcom_nand_host *host,
2003 struct device_node *dn)
2004{
2005 struct nand_chip *chip = &host->chip;
2006 struct mtd_info *mtd = nand_to_mtd(chip);
2007 struct device *dev = nandc->dev;
2008 int ret;
2009
2010 ret = of_property_read_u32(dn, "reg", &host->cs);
2011 if (ret) {
2012 dev_err(dev, "can't get chip-select\n");
2013 return -ENXIO;
2014 }
2015
2016 nand_set_flash_node(chip, dn);
2017 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2018 mtd->owner = THIS_MODULE;
2019 mtd->dev.parent = dev;
2020
2021 chip->cmdfunc = qcom_nandc_command;
2022 chip->select_chip = qcom_nandc_select_chip;
2023 chip->read_byte = qcom_nandc_read_byte;
2024 chip->read_buf = qcom_nandc_read_buf;
2025 chip->write_buf = qcom_nandc_write_buf;
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035 chip->block_bad = qcom_nandc_block_bad;
2036 chip->block_markbad = qcom_nandc_block_markbad;
2037
2038 chip->controller = &nandc->controller;
2039 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2040 NAND_SKIP_BBTSCAN;
2041
2042
2043 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2044
2045 ret = nand_scan_ident(mtd, 1, NULL);
2046 if (ret)
2047 return ret;
2048
2049 ret = qcom_nand_host_setup(host);
2050 if (ret)
2051 return ret;
2052
2053 ret = nand_scan_tail(mtd);
2054 if (ret)
2055 return ret;
2056
2057 return mtd_device_register(mtd, NULL, 0);
2058}
2059
2060
2061static int qcom_nandc_parse_dt(struct platform_device *pdev)
2062{
2063 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2064 struct device_node *np = nandc->dev->of_node;
2065 int ret;
2066
2067 ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci);
2068 if (ret) {
2069 dev_err(nandc->dev, "command CRCI unspecified\n");
2070 return ret;
2071 }
2072
2073 ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci);
2074 if (ret) {
2075 dev_err(nandc->dev, "data CRCI unspecified\n");
2076 return ret;
2077 }
2078
2079 return 0;
2080}
2081
2082static int qcom_nandc_probe(struct platform_device *pdev)
2083{
2084 struct qcom_nand_controller *nandc;
2085 struct qcom_nand_host *host;
2086 const void *dev_data;
2087 struct device *dev = &pdev->dev;
2088 struct device_node *dn = dev->of_node, *child;
2089 struct resource *res;
2090 int ret;
2091
2092 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2093 if (!nandc)
2094 return -ENOMEM;
2095
2096 platform_set_drvdata(pdev, nandc);
2097 nandc->dev = dev;
2098
2099 dev_data = of_device_get_match_data(dev);
2100 if (!dev_data) {
2101 dev_err(&pdev->dev, "failed to get device data\n");
2102 return -ENODEV;
2103 }
2104
2105 nandc->ecc_modes = (unsigned long)dev_data;
2106
2107 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2108 nandc->base = devm_ioremap_resource(dev, res);
2109 if (IS_ERR(nandc->base))
2110 return PTR_ERR(nandc->base);
2111
2112 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
2113
2114 nandc->core_clk = devm_clk_get(dev, "core");
2115 if (IS_ERR(nandc->core_clk))
2116 return PTR_ERR(nandc->core_clk);
2117
2118 nandc->aon_clk = devm_clk_get(dev, "aon");
2119 if (IS_ERR(nandc->aon_clk))
2120 return PTR_ERR(nandc->aon_clk);
2121
2122 ret = qcom_nandc_parse_dt(pdev);
2123 if (ret)
2124 return ret;
2125
2126 ret = qcom_nandc_alloc(nandc);
2127 if (ret)
2128 return ret;
2129
2130 ret = clk_prepare_enable(nandc->core_clk);
2131 if (ret)
2132 goto err_core_clk;
2133
2134 ret = clk_prepare_enable(nandc->aon_clk);
2135 if (ret)
2136 goto err_aon_clk;
2137
2138 ret = qcom_nandc_setup(nandc);
2139 if (ret)
2140 goto err_setup;
2141
2142 for_each_available_child_of_node(dn, child) {
2143 if (of_device_is_compatible(child, "qcom,nandcs")) {
2144 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2145 if (!host) {
2146 of_node_put(child);
2147 ret = -ENOMEM;
2148 goto err_cs_init;
2149 }
2150
2151 ret = qcom_nand_host_init(nandc, host, child);
2152 if (ret) {
2153 devm_kfree(dev, host);
2154 continue;
2155 }
2156
2157 list_add_tail(&host->node, &nandc->host_list);
2158 }
2159 }
2160
2161 if (list_empty(&nandc->host_list)) {
2162 ret = -ENODEV;
2163 goto err_cs_init;
2164 }
2165
2166 return 0;
2167
2168err_cs_init:
2169 list_for_each_entry(host, &nandc->host_list, node)
2170 nand_release(nand_to_mtd(&host->chip));
2171err_setup:
2172 clk_disable_unprepare(nandc->aon_clk);
2173err_aon_clk:
2174 clk_disable_unprepare(nandc->core_clk);
2175err_core_clk:
2176 qcom_nandc_unalloc(nandc);
2177
2178 return ret;
2179}
2180
2181static int qcom_nandc_remove(struct platform_device *pdev)
2182{
2183 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2184 struct qcom_nand_host *host;
2185
2186 list_for_each_entry(host, &nandc->host_list, node)
2187 nand_release(nand_to_mtd(&host->chip));
2188
2189 qcom_nandc_unalloc(nandc);
2190
2191 clk_disable_unprepare(nandc->aon_clk);
2192 clk_disable_unprepare(nandc->core_clk);
2193
2194 return 0;
2195}
2196
2197#define EBI2_NANDC_ECC_MODES (ECC_RS_4BIT | ECC_BCH_8BIT)
2198
2199
2200
2201
2202
2203static const struct of_device_id qcom_nandc_of_match[] = {
2204 { .compatible = "qcom,ipq806x-nand",
2205 .data = (void *)EBI2_NANDC_ECC_MODES,
2206 },
2207 {}
2208};
2209MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
2210
2211static struct platform_driver qcom_nandc_driver = {
2212 .driver = {
2213 .name = "qcom-nandc",
2214 .of_match_table = qcom_nandc_of_match,
2215 },
2216 .probe = qcom_nandc_probe,
2217 .remove = qcom_nandc_remove,
2218};
2219module_platform_driver(qcom_nandc_driver);
2220
2221MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
2222MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
2223MODULE_LICENSE("GPL v2");
2224