1
2
3
4
5
6#include <linux/clk.h>
7#include <linux/version.h>
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/platform_device.h>
13#include <linux/err.h>
14#include <linux/completion.h>
15#include <linux/interrupt.h>
16#include <linux/spinlock.h>
17#include <linux/dma-mapping.h>
18#include <linux/ioport.h>
19#include <linux/bug.h>
20#include <linux/kernel.h>
21#include <linux/bitops.h>
22#include <linux/mm.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/rawnand.h>
25#include <linux/mtd/partitions.h>
26#include <linux/of.h>
27#include <linux/of_platform.h>
28#include <linux/slab.h>
29#include <linux/list.h>
30#include <linux/log2.h>
31
32#include "brcmnand.h"
33
34
35
36
37
38
39
40
41static int wp_on = 1;
42module_param(wp_on, int, 0444);
43
44
45
46
47
48#define DRV_NAME "brcmnand"
49
50#define CMD_NULL 0x00
51#define CMD_PAGE_READ 0x01
52#define CMD_SPARE_AREA_READ 0x02
53#define CMD_STATUS_READ 0x03
54#define CMD_PROGRAM_PAGE 0x04
55#define CMD_PROGRAM_SPARE_AREA 0x05
56#define CMD_COPY_BACK 0x06
57#define CMD_DEVICE_ID_READ 0x07
58#define CMD_BLOCK_ERASE 0x08
59#define CMD_FLASH_RESET 0x09
60#define CMD_BLOCKS_LOCK 0x0a
61#define CMD_BLOCKS_LOCK_DOWN 0x0b
62#define CMD_BLOCKS_UNLOCK 0x0c
63#define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
64#define CMD_PARAMETER_READ 0x0e
65#define CMD_PARAMETER_CHANGE_COL 0x0f
66#define CMD_LOW_LEVEL_OP 0x10
67
68struct brcm_nand_dma_desc {
69 u32 next_desc;
70 u32 next_desc_ext;
71 u32 cmd_irq;
72 u32 dram_addr;
73 u32 dram_addr_ext;
74 u32 tfr_len;
75 u32 total_len;
76 u32 flash_addr;
77 u32 flash_addr_ext;
78 u32 cs;
79 u32 pad2[5];
80 u32 status_valid;
81} __packed;
82
83
84#define FLASH_DMA_ECC_ERROR (1 << 8)
85#define FLASH_DMA_CORR_ERROR (1 << 9)
86
87
88#define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1)
89#define FLASH_DMA_MODE_MODE BIT(0)
90#define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \
91 FLASH_DMA_MODE_MODE)
92
93
94#define FC_SHIFT 9U
95#define FC_BYTES 512U
96#define FC_WORDS (FC_BYTES >> 2)
97
98#define BRCMNAND_MIN_PAGESIZE 512
99#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
100#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
101
102#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
103#define NAND_POLL_STATUS_TIMEOUT_MS 100
104
105
106enum flash_dma_reg {
107 FLASH_DMA_REVISION = 0,
108 FLASH_DMA_FIRST_DESC,
109 FLASH_DMA_FIRST_DESC_EXT,
110 FLASH_DMA_CTRL,
111 FLASH_DMA_MODE,
112 FLASH_DMA_STATUS,
113 FLASH_DMA_INTERRUPT_DESC,
114 FLASH_DMA_INTERRUPT_DESC_EXT,
115 FLASH_DMA_ERROR_STATUS,
116 FLASH_DMA_CURRENT_DESC,
117 FLASH_DMA_CURRENT_DESC_EXT,
118};
119
120
121static const u16 flash_dma_regs_v1[] = {
122 [FLASH_DMA_REVISION] = 0x00,
123 [FLASH_DMA_FIRST_DESC] = 0x04,
124 [FLASH_DMA_FIRST_DESC_EXT] = 0x08,
125 [FLASH_DMA_CTRL] = 0x0c,
126 [FLASH_DMA_MODE] = 0x10,
127 [FLASH_DMA_STATUS] = 0x14,
128 [FLASH_DMA_INTERRUPT_DESC] = 0x18,
129 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c,
130 [FLASH_DMA_ERROR_STATUS] = 0x20,
131 [FLASH_DMA_CURRENT_DESC] = 0x24,
132 [FLASH_DMA_CURRENT_DESC_EXT] = 0x28,
133};
134
135
136static const u16 flash_dma_regs_v4[] = {
137 [FLASH_DMA_REVISION] = 0x00,
138 [FLASH_DMA_FIRST_DESC] = 0x08,
139 [FLASH_DMA_FIRST_DESC_EXT] = 0x0c,
140 [FLASH_DMA_CTRL] = 0x10,
141 [FLASH_DMA_MODE] = 0x14,
142 [FLASH_DMA_STATUS] = 0x18,
143 [FLASH_DMA_INTERRUPT_DESC] = 0x20,
144 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24,
145 [FLASH_DMA_ERROR_STATUS] = 0x28,
146 [FLASH_DMA_CURRENT_DESC] = 0x30,
147 [FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
148};
149
150
151enum {
152 BRCMNAND_HAS_1K_SECTORS = BIT(0),
153 BRCMNAND_HAS_PREFETCH = BIT(1),
154 BRCMNAND_HAS_CACHE_MODE = BIT(2),
155 BRCMNAND_HAS_WP = BIT(3),
156};
157
158struct brcmnand_controller {
159 struct device *dev;
160 struct nand_controller controller;
161 void __iomem *nand_base;
162 void __iomem *nand_fc;
163 void __iomem *flash_dma_base;
164 unsigned int irq;
165 unsigned int dma_irq;
166 int nand_version;
167
168
169 struct brcmnand_soc *soc;
170
171
172 struct clk *clk;
173
174 int cmd_pending;
175 bool dma_pending;
176 struct completion done;
177 struct completion dma_done;
178
179
180 struct list_head host_list;
181
182
183 const u16 *flash_dma_offsets;
184 struct brcm_nand_dma_desc *dma_desc;
185 dma_addr_t dma_pa;
186
187
188 u8 flash_cache[FC_BYTES];
189
190
191 const u16 *reg_offsets;
192 unsigned int reg_spacing;
193 const u8 *cs_offsets;
194 const u8 *cs0_offsets;
195 unsigned int max_block_size;
196 const unsigned int *block_sizes;
197 unsigned int max_page_size;
198 const unsigned int *page_sizes;
199 unsigned int max_oob;
200 u32 features;
201
202
203 u32 nand_cs_nand_select;
204 u32 nand_cs_nand_xor;
205 u32 corr_stat_threshold;
206 u32 flash_dma_mode;
207 bool pio_poll_mode;
208};
209
210struct brcmnand_cfg {
211 u64 device_size;
212 unsigned int block_size;
213 unsigned int page_size;
214 unsigned int spare_area_size;
215 unsigned int device_width;
216 unsigned int col_adr_bytes;
217 unsigned int blk_adr_bytes;
218 unsigned int ful_adr_bytes;
219 unsigned int sector_size_1k;
220 unsigned int ecc_level;
221
222 u32 acc_control;
223 u32 config;
224 u32 config_ext;
225 u32 timing_1;
226 u32 timing_2;
227};
228
229struct brcmnand_host {
230 struct list_head node;
231
232 struct nand_chip chip;
233 struct platform_device *pdev;
234 int cs;
235
236 unsigned int last_cmd;
237 unsigned int last_byte;
238 u64 last_addr;
239 struct brcmnand_cfg hwcfg;
240 struct brcmnand_controller *ctrl;
241};
242
243enum brcmnand_reg {
244 BRCMNAND_CMD_START = 0,
245 BRCMNAND_CMD_EXT_ADDRESS,
246 BRCMNAND_CMD_ADDRESS,
247 BRCMNAND_INTFC_STATUS,
248 BRCMNAND_CS_SELECT,
249 BRCMNAND_CS_XOR,
250 BRCMNAND_LL_OP,
251 BRCMNAND_CS0_BASE,
252 BRCMNAND_CS1_BASE,
253 BRCMNAND_CORR_THRESHOLD,
254 BRCMNAND_CORR_THRESHOLD_EXT,
255 BRCMNAND_UNCORR_COUNT,
256 BRCMNAND_CORR_COUNT,
257 BRCMNAND_CORR_EXT_ADDR,
258 BRCMNAND_CORR_ADDR,
259 BRCMNAND_UNCORR_EXT_ADDR,
260 BRCMNAND_UNCORR_ADDR,
261 BRCMNAND_SEMAPHORE,
262 BRCMNAND_ID,
263 BRCMNAND_ID_EXT,
264 BRCMNAND_LL_RDATA,
265 BRCMNAND_OOB_READ_BASE,
266 BRCMNAND_OOB_READ_10_BASE,
267 BRCMNAND_OOB_WRITE_BASE,
268 BRCMNAND_OOB_WRITE_10_BASE,
269 BRCMNAND_FC_BASE,
270};
271
272
273static const u16 brcmnand_regs_v40[] = {
274 [BRCMNAND_CMD_START] = 0x04,
275 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
276 [BRCMNAND_CMD_ADDRESS] = 0x0c,
277 [BRCMNAND_INTFC_STATUS] = 0x6c,
278 [BRCMNAND_CS_SELECT] = 0x14,
279 [BRCMNAND_CS_XOR] = 0x18,
280 [BRCMNAND_LL_OP] = 0x178,
281 [BRCMNAND_CS0_BASE] = 0x40,
282 [BRCMNAND_CS1_BASE] = 0xd0,
283 [BRCMNAND_CORR_THRESHOLD] = 0x84,
284 [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
285 [BRCMNAND_UNCORR_COUNT] = 0,
286 [BRCMNAND_CORR_COUNT] = 0,
287 [BRCMNAND_CORR_EXT_ADDR] = 0x70,
288 [BRCMNAND_CORR_ADDR] = 0x74,
289 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
290 [BRCMNAND_UNCORR_ADDR] = 0x7c,
291 [BRCMNAND_SEMAPHORE] = 0x58,
292 [BRCMNAND_ID] = 0x60,
293 [BRCMNAND_ID_EXT] = 0x64,
294 [BRCMNAND_LL_RDATA] = 0x17c,
295 [BRCMNAND_OOB_READ_BASE] = 0x20,
296 [BRCMNAND_OOB_READ_10_BASE] = 0x130,
297 [BRCMNAND_OOB_WRITE_BASE] = 0x30,
298 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
299 [BRCMNAND_FC_BASE] = 0x200,
300};
301
302
303static const u16 brcmnand_regs_v50[] = {
304 [BRCMNAND_CMD_START] = 0x04,
305 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
306 [BRCMNAND_CMD_ADDRESS] = 0x0c,
307 [BRCMNAND_INTFC_STATUS] = 0x6c,
308 [BRCMNAND_CS_SELECT] = 0x14,
309 [BRCMNAND_CS_XOR] = 0x18,
310 [BRCMNAND_LL_OP] = 0x178,
311 [BRCMNAND_CS0_BASE] = 0x40,
312 [BRCMNAND_CS1_BASE] = 0xd0,
313 [BRCMNAND_CORR_THRESHOLD] = 0x84,
314 [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
315 [BRCMNAND_UNCORR_COUNT] = 0,
316 [BRCMNAND_CORR_COUNT] = 0,
317 [BRCMNAND_CORR_EXT_ADDR] = 0x70,
318 [BRCMNAND_CORR_ADDR] = 0x74,
319 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
320 [BRCMNAND_UNCORR_ADDR] = 0x7c,
321 [BRCMNAND_SEMAPHORE] = 0x58,
322 [BRCMNAND_ID] = 0x60,
323 [BRCMNAND_ID_EXT] = 0x64,
324 [BRCMNAND_LL_RDATA] = 0x17c,
325 [BRCMNAND_OOB_READ_BASE] = 0x20,
326 [BRCMNAND_OOB_READ_10_BASE] = 0x130,
327 [BRCMNAND_OOB_WRITE_BASE] = 0x30,
328 [BRCMNAND_OOB_WRITE_10_BASE] = 0x140,
329 [BRCMNAND_FC_BASE] = 0x200,
330};
331
332
333static const u16 brcmnand_regs_v60[] = {
334 [BRCMNAND_CMD_START] = 0x04,
335 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
336 [BRCMNAND_CMD_ADDRESS] = 0x0c,
337 [BRCMNAND_INTFC_STATUS] = 0x14,
338 [BRCMNAND_CS_SELECT] = 0x18,
339 [BRCMNAND_CS_XOR] = 0x1c,
340 [BRCMNAND_LL_OP] = 0x20,
341 [BRCMNAND_CS0_BASE] = 0x50,
342 [BRCMNAND_CS1_BASE] = 0,
343 [BRCMNAND_CORR_THRESHOLD] = 0xc0,
344 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
345 [BRCMNAND_UNCORR_COUNT] = 0xfc,
346 [BRCMNAND_CORR_COUNT] = 0x100,
347 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
348 [BRCMNAND_CORR_ADDR] = 0x110,
349 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
350 [BRCMNAND_UNCORR_ADDR] = 0x118,
351 [BRCMNAND_SEMAPHORE] = 0x150,
352 [BRCMNAND_ID] = 0x194,
353 [BRCMNAND_ID_EXT] = 0x198,
354 [BRCMNAND_LL_RDATA] = 0x19c,
355 [BRCMNAND_OOB_READ_BASE] = 0x200,
356 [BRCMNAND_OOB_READ_10_BASE] = 0,
357 [BRCMNAND_OOB_WRITE_BASE] = 0x280,
358 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
359 [BRCMNAND_FC_BASE] = 0x400,
360};
361
362
363static const u16 brcmnand_regs_v71[] = {
364 [BRCMNAND_CMD_START] = 0x04,
365 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
366 [BRCMNAND_CMD_ADDRESS] = 0x0c,
367 [BRCMNAND_INTFC_STATUS] = 0x14,
368 [BRCMNAND_CS_SELECT] = 0x18,
369 [BRCMNAND_CS_XOR] = 0x1c,
370 [BRCMNAND_LL_OP] = 0x20,
371 [BRCMNAND_CS0_BASE] = 0x50,
372 [BRCMNAND_CS1_BASE] = 0,
373 [BRCMNAND_CORR_THRESHOLD] = 0xdc,
374 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
375 [BRCMNAND_UNCORR_COUNT] = 0xfc,
376 [BRCMNAND_CORR_COUNT] = 0x100,
377 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
378 [BRCMNAND_CORR_ADDR] = 0x110,
379 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
380 [BRCMNAND_UNCORR_ADDR] = 0x118,
381 [BRCMNAND_SEMAPHORE] = 0x150,
382 [BRCMNAND_ID] = 0x194,
383 [BRCMNAND_ID_EXT] = 0x198,
384 [BRCMNAND_LL_RDATA] = 0x19c,
385 [BRCMNAND_OOB_READ_BASE] = 0x200,
386 [BRCMNAND_OOB_READ_10_BASE] = 0,
387 [BRCMNAND_OOB_WRITE_BASE] = 0x280,
388 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
389 [BRCMNAND_FC_BASE] = 0x400,
390};
391
392
393static const u16 brcmnand_regs_v72[] = {
394 [BRCMNAND_CMD_START] = 0x04,
395 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
396 [BRCMNAND_CMD_ADDRESS] = 0x0c,
397 [BRCMNAND_INTFC_STATUS] = 0x14,
398 [BRCMNAND_CS_SELECT] = 0x18,
399 [BRCMNAND_CS_XOR] = 0x1c,
400 [BRCMNAND_LL_OP] = 0x20,
401 [BRCMNAND_CS0_BASE] = 0x50,
402 [BRCMNAND_CS1_BASE] = 0,
403 [BRCMNAND_CORR_THRESHOLD] = 0xdc,
404 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
405 [BRCMNAND_UNCORR_COUNT] = 0xfc,
406 [BRCMNAND_CORR_COUNT] = 0x100,
407 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
408 [BRCMNAND_CORR_ADDR] = 0x110,
409 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
410 [BRCMNAND_UNCORR_ADDR] = 0x118,
411 [BRCMNAND_SEMAPHORE] = 0x150,
412 [BRCMNAND_ID] = 0x194,
413 [BRCMNAND_ID_EXT] = 0x198,
414 [BRCMNAND_LL_RDATA] = 0x19c,
415 [BRCMNAND_OOB_READ_BASE] = 0x200,
416 [BRCMNAND_OOB_READ_10_BASE] = 0,
417 [BRCMNAND_OOB_WRITE_BASE] = 0x400,
418 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
419 [BRCMNAND_FC_BASE] = 0x600,
420};
421
422enum brcmnand_cs_reg {
423 BRCMNAND_CS_CFG_EXT = 0,
424 BRCMNAND_CS_CFG,
425 BRCMNAND_CS_ACC_CONTROL,
426 BRCMNAND_CS_TIMING1,
427 BRCMNAND_CS_TIMING2,
428};
429
430
431static const u8 brcmnand_cs_offsets_v71[] = {
432 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
433 [BRCMNAND_CS_CFG_EXT] = 0x04,
434 [BRCMNAND_CS_CFG] = 0x08,
435 [BRCMNAND_CS_TIMING1] = 0x0c,
436 [BRCMNAND_CS_TIMING2] = 0x10,
437};
438
439
440static const u8 brcmnand_cs_offsets[] = {
441 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
442 [BRCMNAND_CS_CFG_EXT] = 0x04,
443 [BRCMNAND_CS_CFG] = 0x04,
444 [BRCMNAND_CS_TIMING1] = 0x08,
445 [BRCMNAND_CS_TIMING2] = 0x0c,
446};
447
448
449static const u8 brcmnand_cs_offsets_cs0[] = {
450 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
451 [BRCMNAND_CS_CFG_EXT] = 0x08,
452 [BRCMNAND_CS_CFG] = 0x08,
453 [BRCMNAND_CS_TIMING1] = 0x10,
454 [BRCMNAND_CS_TIMING2] = 0x14,
455};
456
457
458
459
460
461
462enum {
463 CFG_BLK_ADR_BYTES_SHIFT = 8,
464 CFG_COL_ADR_BYTES_SHIFT = 12,
465 CFG_FUL_ADR_BYTES_SHIFT = 16,
466 CFG_BUS_WIDTH_SHIFT = 23,
467 CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT),
468 CFG_DEVICE_SIZE_SHIFT = 24,
469
470
471 CFG_PAGE_SIZE_SHIFT = 20,
472 CFG_BLK_SIZE_SHIFT = 28,
473
474
475 CFG_EXT_PAGE_SIZE_SHIFT = 0,
476 CFG_EXT_BLK_SIZE_SHIFT = 4,
477};
478
479
480enum {
481 INTFC_FLASH_STATUS = GENMASK(7, 0),
482
483 INTFC_ERASED = BIT(27),
484 INTFC_OOB_VALID = BIT(28),
485 INTFC_CACHE_VALID = BIT(29),
486 INTFC_FLASH_READY = BIT(30),
487 INTFC_CTLR_READY = BIT(31),
488};
489
490static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
491{
492 return brcmnand_readl(ctrl->nand_base + offs);
493}
494
495static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
496 u32 val)
497{
498 brcmnand_writel(val, ctrl->nand_base + offs);
499}
500
501static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
502{
503 static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
504 static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
505 static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
506
507 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
508
509
510 if (ctrl->nand_version < 0x0400) {
511 dev_err(ctrl->dev, "version %#x not supported\n",
512 ctrl->nand_version);
513 return -ENODEV;
514 }
515
516
517 if (ctrl->nand_version >= 0x0702)
518 ctrl->reg_offsets = brcmnand_regs_v72;
519 else if (ctrl->nand_version == 0x0701)
520 ctrl->reg_offsets = brcmnand_regs_v71;
521 else if (ctrl->nand_version >= 0x0600)
522 ctrl->reg_offsets = brcmnand_regs_v60;
523 else if (ctrl->nand_version >= 0x0500)
524 ctrl->reg_offsets = brcmnand_regs_v50;
525 else if (ctrl->nand_version >= 0x0400)
526 ctrl->reg_offsets = brcmnand_regs_v40;
527
528
529 if (ctrl->nand_version >= 0x0701)
530 ctrl->reg_spacing = 0x14;
531 else
532 ctrl->reg_spacing = 0x10;
533
534
535 if (ctrl->nand_version >= 0x0701) {
536 ctrl->cs_offsets = brcmnand_cs_offsets_v71;
537 } else {
538 ctrl->cs_offsets = brcmnand_cs_offsets;
539
540
541 if (ctrl->nand_version <= 0x0500)
542 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
543 }
544
545
546 if (ctrl->nand_version >= 0x0701) {
547
548 ctrl->max_page_size = 16 * 1024;
549 ctrl->max_block_size = 2 * 1024 * 1024;
550 } else {
551 ctrl->page_sizes = page_sizes;
552 if (ctrl->nand_version >= 0x0600)
553 ctrl->block_sizes = block_sizes_v6;
554 else
555 ctrl->block_sizes = block_sizes_v4;
556
557 if (ctrl->nand_version < 0x0400) {
558 ctrl->max_page_size = 4096;
559 ctrl->max_block_size = 512 * 1024;
560 }
561 }
562
563
564 if (ctrl->nand_version == 0x0702)
565 ctrl->max_oob = 128;
566 else if (ctrl->nand_version >= 0x0600)
567 ctrl->max_oob = 64;
568 else if (ctrl->nand_version >= 0x0500)
569 ctrl->max_oob = 32;
570 else
571 ctrl->max_oob = 16;
572
573
574 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
575 ctrl->features |= BRCMNAND_HAS_PREFETCH;
576
577
578
579
580
581 if (ctrl->nand_version >= 0x0700)
582 ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
583
584 if (ctrl->nand_version >= 0x0500)
585 ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
586
587 if (ctrl->nand_version >= 0x0700)
588 ctrl->features |= BRCMNAND_HAS_WP;
589 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
590 ctrl->features |= BRCMNAND_HAS_WP;
591
592 return 0;
593}
594
595static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
596{
597
598 if (ctrl->nand_version >= 0x0703)
599 ctrl->flash_dma_offsets = flash_dma_regs_v4;
600 else
601 ctrl->flash_dma_offsets = flash_dma_regs_v1;
602}
603
604static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
605 enum brcmnand_reg reg)
606{
607 u16 offs = ctrl->reg_offsets[reg];
608
609 if (offs)
610 return nand_readreg(ctrl, offs);
611 else
612 return 0;
613}
614
615static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
616 enum brcmnand_reg reg, u32 val)
617{
618 u16 offs = ctrl->reg_offsets[reg];
619
620 if (offs)
621 nand_writereg(ctrl, offs, val);
622}
623
624static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
625 enum brcmnand_reg reg, u32 mask, unsigned
626 int shift, u32 val)
627{
628 u32 tmp = brcmnand_read_reg(ctrl, reg);
629
630 tmp &= ~mask;
631 tmp |= val << shift;
632 brcmnand_write_reg(ctrl, reg, tmp);
633}
634
635static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
636{
637 return __raw_readl(ctrl->nand_fc + word * 4);
638}
639
640static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
641 int word, u32 val)
642{
643 __raw_writel(val, ctrl->nand_fc + word * 4);
644}
645
646static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
647{
648
649
650 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
651 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
652 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
653 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
654}
655
656static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
657{
658 u64 err_addr;
659
660 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
661 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
662 BRCMNAND_UNCORR_EXT_ADDR)
663 & 0xffff) << 32);
664
665 return err_addr;
666}
667
668static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
669{
670 u64 err_addr;
671
672 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
673 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
674 BRCMNAND_CORR_EXT_ADDR)
675 & 0xffff) << 32);
676
677 return err_addr;
678}
679
680static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
681{
682 struct nand_chip *chip = mtd_to_nand(mtd);
683 struct brcmnand_host *host = nand_get_controller_data(chip);
684 struct brcmnand_controller *ctrl = host->ctrl;
685
686 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
687 (host->cs << 16) | ((addr >> 32) & 0xffff));
688 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
689 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
690 lower_32_bits(addr));
691 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
692}
693
694static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
695 enum brcmnand_cs_reg reg)
696{
697 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
698 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
699 u8 cs_offs;
700
701 if (cs == 0 && ctrl->cs0_offsets)
702 cs_offs = ctrl->cs0_offsets[reg];
703 else
704 cs_offs = ctrl->cs_offsets[reg];
705
706 if (cs && offs_cs1)
707 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
708
709 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
710}
711
712static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
713{
714 if (ctrl->nand_version < 0x0600)
715 return 1;
716 return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
717}
718
719static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
720{
721 struct brcmnand_controller *ctrl = host->ctrl;
722 unsigned int shift = 0, bits;
723 enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
724 int cs = host->cs;
725
726 if (ctrl->nand_version == 0x0702)
727 bits = 7;
728 else if (ctrl->nand_version >= 0x0600)
729 bits = 6;
730 else if (ctrl->nand_version >= 0x0500)
731 bits = 5;
732 else
733 bits = 4;
734
735 if (ctrl->nand_version >= 0x0702) {
736 if (cs >= 4)
737 reg = BRCMNAND_CORR_THRESHOLD_EXT;
738 shift = (cs % 4) * bits;
739 } else if (ctrl->nand_version >= 0x0600) {
740 if (cs >= 5)
741 reg = BRCMNAND_CORR_THRESHOLD_EXT;
742 shift = (cs % 5) * bits;
743 }
744 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
745}
746
747static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
748{
749 if (ctrl->nand_version < 0x0602)
750 return 24;
751 return 0;
752}
753
754
755
756
757
758
759
760
761
762enum {
763
764 ACC_CONTROL_CACHE_MODE = BIT(22),
765
766
767 ACC_CONTROL_PREFETCH = BIT(23),
768
769 ACC_CONTROL_PAGE_HIT = BIT(24),
770 ACC_CONTROL_WR_PREEMPT = BIT(25),
771 ACC_CONTROL_PARTIAL_PAGE = BIT(26),
772 ACC_CONTROL_RD_ERASED = BIT(27),
773 ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
774 ACC_CONTROL_WR_ECC = BIT(30),
775 ACC_CONTROL_RD_ECC = BIT(31),
776};
777
778static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
779{
780 if (ctrl->nand_version == 0x0702)
781 return GENMASK(7, 0);
782 else if (ctrl->nand_version >= 0x0600)
783 return GENMASK(6, 0);
784 else
785 return GENMASK(5, 0);
786}
787
788#define NAND_ACC_CONTROL_ECC_SHIFT 16
789#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
790
791static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
792{
793 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
794
795 mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
796
797
798 if (ctrl->nand_version >= 0x0702)
799 mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
800
801 return mask;
802}
803
804static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
805{
806 struct brcmnand_controller *ctrl = host->ctrl;
807 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
808 u32 acc_control = nand_readreg(ctrl, offs);
809 u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
810
811 if (en) {
812 acc_control |= ecc_flags;
813 acc_control |= host->hwcfg.ecc_level
814 << NAND_ACC_CONTROL_ECC_SHIFT;
815 } else {
816 acc_control &= ~ecc_flags;
817 acc_control &= ~brcmnand_ecc_level_mask(ctrl);
818 }
819
820 nand_writereg(ctrl, offs, acc_control);
821}
822
823static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
824{
825 if (ctrl->nand_version >= 0x0702)
826 return 9;
827 else if (ctrl->nand_version >= 0x0600)
828 return 7;
829 else if (ctrl->nand_version >= 0x0500)
830 return 6;
831 else
832 return -1;
833}
834
835static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
836{
837 struct brcmnand_controller *ctrl = host->ctrl;
838 int shift = brcmnand_sector_1k_shift(ctrl);
839 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
840 BRCMNAND_CS_ACC_CONTROL);
841
842 if (shift < 0)
843 return 0;
844
845 return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
846}
847
848static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
849{
850 struct brcmnand_controller *ctrl = host->ctrl;
851 int shift = brcmnand_sector_1k_shift(ctrl);
852 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
853 BRCMNAND_CS_ACC_CONTROL);
854 u32 tmp;
855
856 if (shift < 0)
857 return;
858
859 tmp = nand_readreg(ctrl, acc_control_offs);
860 tmp &= ~(1 << shift);
861 tmp |= (!!val) << shift;
862 nand_writereg(ctrl, acc_control_offs, tmp);
863}
864
865
866
867
868
869enum {
870 CS_SELECT_NAND_WP = BIT(29),
871 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
872};
873
874static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
875 u32 mask, u32 expected_val,
876 unsigned long timeout_ms)
877{
878 unsigned long limit;
879 u32 val;
880
881 if (!timeout_ms)
882 timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
883
884 limit = jiffies + msecs_to_jiffies(timeout_ms);
885 do {
886 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
887 if ((val & mask) == expected_val)
888 return 0;
889
890 cpu_relax();
891 } while (time_after(limit, jiffies));
892
893 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
894 expected_val, val & mask);
895
896 return -ETIMEDOUT;
897}
898
899static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
900{
901 u32 val = en ? CS_SELECT_NAND_WP : 0;
902
903 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
904}
905
906
907
908
909
910static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
911{
912 return ctrl->flash_dma_base;
913}
914
915static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
916{
917 if (ctrl->pio_poll_mode)
918 return;
919
920 if (has_flash_dma(ctrl)) {
921 ctrl->flash_dma_base = 0;
922 disable_irq(ctrl->dma_irq);
923 }
924
925 disable_irq(ctrl->irq);
926 ctrl->pio_poll_mode = true;
927}
928
929static inline bool flash_dma_buf_ok(const void *buf)
930{
931 return buf && !is_vmalloc_addr(buf) &&
932 likely(IS_ALIGNED((uintptr_t)buf, 4));
933}
934
935static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
936 enum flash_dma_reg dma_reg, u32 val)
937{
938 u16 offs = ctrl->flash_dma_offsets[dma_reg];
939
940 brcmnand_writel(val, ctrl->flash_dma_base + offs);
941}
942
943static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
944 enum flash_dma_reg dma_reg)
945{
946 u16 offs = ctrl->flash_dma_offsets[dma_reg];
947
948 return brcmnand_readl(ctrl->flash_dma_base + offs);
949}
950
951
952enum brcmnand_llop_type {
953 LL_OP_CMD,
954 LL_OP_ADDR,
955 LL_OP_WR,
956 LL_OP_RD,
957};
958
959
960
961
962
963static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
964 struct brcmnand_cfg *cfg)
965{
966 if (ctrl->nand_version <= 0x0701)
967 return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
968 cfg->ecc_level == 15;
969 else
970 return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
971 cfg->ecc_level == 15) ||
972 (cfg->spare_area_size == 28 && cfg->ecc_level == 16));
973}
974
975
976
977
978
979
980static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
981 struct mtd_oob_region *oobregion)
982{
983 struct nand_chip *chip = mtd_to_nand(mtd);
984 struct brcmnand_host *host = nand_get_controller_data(chip);
985 struct brcmnand_cfg *cfg = &host->hwcfg;
986 int sas = cfg->spare_area_size << cfg->sector_size_1k;
987 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
988
989 if (section >= sectors)
990 return -ERANGE;
991
992 oobregion->offset = (section * sas) + 6;
993 oobregion->length = 3;
994
995 return 0;
996}
997
998static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
999 struct mtd_oob_region *oobregion)
1000{
1001 struct nand_chip *chip = mtd_to_nand(mtd);
1002 struct brcmnand_host *host = nand_get_controller_data(chip);
1003 struct brcmnand_cfg *cfg = &host->hwcfg;
1004 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1005 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
1006
1007 if (section >= sectors * 2)
1008 return -ERANGE;
1009
1010 oobregion->offset = (section / 2) * sas;
1011
1012 if (section & 1) {
1013 oobregion->offset += 9;
1014 oobregion->length = 7;
1015 } else {
1016 oobregion->length = 6;
1017
1018
1019 if (!section) {
1020
1021
1022
1023
1024 if (cfg->page_size > 512)
1025 oobregion->offset++;
1026 oobregion->length--;
1027 }
1028 }
1029
1030 return 0;
1031}
1032
1033static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
1034 .ecc = brcmnand_hamming_ooblayout_ecc,
1035 .free = brcmnand_hamming_ooblayout_free,
1036};
1037
1038static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
1039 struct mtd_oob_region *oobregion)
1040{
1041 struct nand_chip *chip = mtd_to_nand(mtd);
1042 struct brcmnand_host *host = nand_get_controller_data(chip);
1043 struct brcmnand_cfg *cfg = &host->hwcfg;
1044 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1045 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
1046
1047 if (section >= sectors)
1048 return -ERANGE;
1049
1050 oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes;
1051 oobregion->length = chip->ecc.bytes;
1052
1053 return 0;
1054}
1055
1056static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
1057 struct mtd_oob_region *oobregion)
1058{
1059 struct nand_chip *chip = mtd_to_nand(mtd);
1060 struct brcmnand_host *host = nand_get_controller_data(chip);
1061 struct brcmnand_cfg *cfg = &host->hwcfg;
1062 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1063 int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
1064
1065 if (section >= sectors)
1066 return -ERANGE;
1067
1068 if (sas <= chip->ecc.bytes)
1069 return 0;
1070
1071 oobregion->offset = section * sas;
1072 oobregion->length = sas - chip->ecc.bytes;
1073
1074 if (!section) {
1075 oobregion->offset++;
1076 oobregion->length--;
1077 }
1078
1079 return 0;
1080}
1081
1082static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
1083 struct mtd_oob_region *oobregion)
1084{
1085 struct nand_chip *chip = mtd_to_nand(mtd);
1086 struct brcmnand_host *host = nand_get_controller_data(chip);
1087 struct brcmnand_cfg *cfg = &host->hwcfg;
1088 int sas = cfg->spare_area_size << cfg->sector_size_1k;
1089
1090 if (section > 1 || sas - chip->ecc.bytes < 6 ||
1091 (section && sas - chip->ecc.bytes == 6))
1092 return -ERANGE;
1093
1094 if (!section) {
1095 oobregion->offset = 0;
1096 oobregion->length = 5;
1097 } else {
1098 oobregion->offset = 6;
1099 oobregion->length = sas - chip->ecc.bytes - 6;
1100 }
1101
1102 return 0;
1103}
1104
1105static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
1106 .ecc = brcmnand_bch_ooblayout_ecc,
1107 .free = brcmnand_bch_ooblayout_free_lp,
1108};
1109
1110static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
1111 .ecc = brcmnand_bch_ooblayout_ecc,
1112 .free = brcmnand_bch_ooblayout_free_sp,
1113};
1114
1115static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
1116{
1117 struct brcmnand_cfg *p = &host->hwcfg;
1118 struct mtd_info *mtd = nand_to_mtd(&host->chip);
1119 struct nand_ecc_ctrl *ecc = &host->chip.ecc;
1120 unsigned int ecc_level = p->ecc_level;
1121 int sas = p->spare_area_size << p->sector_size_1k;
1122 int sectors = p->page_size / (512 << p->sector_size_1k);
1123
1124 if (p->sector_size_1k)
1125 ecc_level <<= 1;
1126
1127 if (is_hamming_ecc(host->ctrl, p)) {
1128 ecc->bytes = 3 * sectors;
1129 mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
1130 return 0;
1131 }
1132
1133
1134
1135
1136
1137
1138
1139 ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
1140 if (p->page_size == 512)
1141 mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
1142 else
1143 mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
1144
1145 if (ecc->bytes >= sas) {
1146 dev_err(&host->pdev->dev,
1147 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
1148 ecc->bytes, sas);
1149 return -EINVAL;
1150 }
1151
1152 return 0;
1153}
1154
1155static void brcmnand_wp(struct mtd_info *mtd, int wp)
1156{
1157 struct nand_chip *chip = mtd_to_nand(mtd);
1158 struct brcmnand_host *host = nand_get_controller_data(chip);
1159 struct brcmnand_controller *ctrl = host->ctrl;
1160
1161 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
1162 static int old_wp = -1;
1163 int ret;
1164
1165 if (old_wp != wp) {
1166 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
1167 old_wp = wp;
1168 }
1169
1170
1171
1172
1173
1174 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
1175 NAND_STATUS_READY,
1176 NAND_CTRL_RDY |
1177 NAND_STATUS_READY, 0);
1178 if (ret)
1179 return;
1180
1181 brcmnand_set_wp(ctrl, wp);
1182 nand_status_op(chip, NULL);
1183
1184 ret = bcmnand_ctrl_poll_status(ctrl,
1185 NAND_CTRL_RDY |
1186 NAND_STATUS_READY |
1187 NAND_STATUS_WP,
1188 NAND_CTRL_RDY |
1189 NAND_STATUS_READY |
1190 (wp ? 0 : NAND_STATUS_WP), 0);
1191
1192 if (ret)
1193 dev_err_ratelimited(&host->pdev->dev,
1194 "nand #WP expected %s\n",
1195 wp ? "on" : "off");
1196 }
1197}
1198
1199
1200static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
1201{
1202 u16 offset0, offset10, reg_offs;
1203
1204 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
1205 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
1206
1207 if (offs >= ctrl->max_oob)
1208 return 0x77;
1209
1210 if (offs >= 16 && offset10)
1211 reg_offs = offset10 + ((offs - 0x10) & ~0x03);
1212 else
1213 reg_offs = offset0 + (offs & ~0x03);
1214
1215 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
1216}
1217
1218static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
1219 u32 data)
1220{
1221 u16 offset0, offset10, reg_offs;
1222
1223 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
1224 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
1225
1226 if (offs >= ctrl->max_oob)
1227 return;
1228
1229 if (offs >= 16 && offset10)
1230 reg_offs = offset10 + ((offs - 0x10) & ~0x03);
1231 else
1232 reg_offs = offset0 + (offs & ~0x03);
1233
1234 nand_writereg(ctrl, reg_offs, data);
1235}
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
1246 int sas, int sector_1k)
1247{
1248 int tbytes = sas << sector_1k;
1249 int j;
1250
1251
1252 if (sector_1k && (i & 0x01))
1253 tbytes = max(0, tbytes - (int)ctrl->max_oob);
1254 tbytes = min_t(int, tbytes, ctrl->max_oob);
1255
1256 for (j = 0; j < tbytes; j++)
1257 oob[j] = oob_reg_read(ctrl, j);
1258 return tbytes;
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
1269 const u8 *oob, int sas, int sector_1k)
1270{
1271 int tbytes = sas << sector_1k;
1272 int j;
1273
1274
1275 if (sector_1k && (i & 0x01))
1276 tbytes = max(0, tbytes - (int)ctrl->max_oob);
1277 tbytes = min_t(int, tbytes, ctrl->max_oob);
1278
1279 for (j = 0; j < tbytes; j += 4)
1280 oob_reg_write(ctrl, j,
1281 (oob[j + 0] << 24) |
1282 (oob[j + 1] << 16) |
1283 (oob[j + 2] << 8) |
1284 (oob[j + 3] << 0));
1285 return tbytes;
1286}
1287
1288static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
1289{
1290 struct brcmnand_controller *ctrl = data;
1291
1292
1293 if (ctrl->dma_pending)
1294 return IRQ_HANDLED;
1295
1296 complete(&ctrl->done);
1297 return IRQ_HANDLED;
1298}
1299
1300
1301static irqreturn_t brcmnand_irq(int irq, void *data)
1302{
1303 struct brcmnand_controller *ctrl = data;
1304
1305 if (ctrl->soc->ctlrdy_ack(ctrl->soc))
1306 return brcmnand_ctlrdy_irq(irq, data);
1307
1308 return IRQ_NONE;
1309}
1310
1311static irqreturn_t brcmnand_dma_irq(int irq, void *data)
1312{
1313 struct brcmnand_controller *ctrl = data;
1314
1315 complete(&ctrl->dma_done);
1316
1317 return IRQ_HANDLED;
1318}
1319
1320static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
1321{
1322 struct brcmnand_controller *ctrl = host->ctrl;
1323 int ret;
1324 u64 cmd_addr;
1325
1326 cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1327
1328 dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
1329
1330 BUG_ON(ctrl->cmd_pending != 0);
1331 ctrl->cmd_pending = cmd;
1332
1333 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
1334 WARN_ON(ret);
1335
1336 mb();
1337 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
1338 cmd << brcmnand_cmd_shift(ctrl));
1339}
1340
1341
1342
1343
1344
1345static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
1346 unsigned int ctrl)
1347{
1348
1349}
1350
1351static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
1352{
1353 struct brcmnand_host *host = nand_get_controller_data(chip);
1354 struct brcmnand_controller *ctrl = host->ctrl;
1355 struct mtd_info *mtd = nand_to_mtd(chip);
1356 bool err = false;
1357 int sts;
1358
1359 if (mtd->oops_panic_write) {
1360
1361 disable_ctrl_irqs(ctrl);
1362 sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
1363 NAND_CTRL_RDY, 0);
1364 err = (sts < 0) ? true : false;
1365 } else {
1366 unsigned long timeo = msecs_to_jiffies(
1367 NAND_POLL_STATUS_TIMEOUT_MS);
1368
1369 sts = wait_for_completion_timeout(&ctrl->done, timeo);
1370 err = (sts <= 0) ? true : false;
1371 }
1372
1373 return err;
1374}
1375
1376static int brcmnand_waitfunc(struct nand_chip *chip)
1377{
1378 struct brcmnand_host *host = nand_get_controller_data(chip);
1379 struct brcmnand_controller *ctrl = host->ctrl;
1380 bool err = false;
1381
1382 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
1383 if (ctrl->cmd_pending)
1384 err = brcmstb_nand_wait_for_completion(chip);
1385
1386 if (err) {
1387 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
1388 >> brcmnand_cmd_shift(ctrl);
1389
1390 dev_err_ratelimited(ctrl->dev,
1391 "timeout waiting for command %#02x\n", cmd);
1392 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
1393 brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
1394 }
1395 ctrl->cmd_pending = 0;
1396 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1397 INTFC_FLASH_STATUS;
1398}
1399
1400enum {
1401 LLOP_RE = BIT(16),
1402 LLOP_WE = BIT(17),
1403 LLOP_ALE = BIT(18),
1404 LLOP_CLE = BIT(19),
1405 LLOP_RETURN_IDLE = BIT(31),
1406
1407 LLOP_DATA_MASK = GENMASK(15, 0),
1408};
1409
1410static int brcmnand_low_level_op(struct brcmnand_host *host,
1411 enum brcmnand_llop_type type, u32 data,
1412 bool last_op)
1413{
1414 struct nand_chip *chip = &host->chip;
1415 struct brcmnand_controller *ctrl = host->ctrl;
1416 u32 tmp;
1417
1418 tmp = data & LLOP_DATA_MASK;
1419 switch (type) {
1420 case LL_OP_CMD:
1421 tmp |= LLOP_WE | LLOP_CLE;
1422 break;
1423 case LL_OP_ADDR:
1424
1425 tmp |= LLOP_WE | LLOP_ALE;
1426 break;
1427 case LL_OP_WR:
1428
1429 tmp |= LLOP_WE;
1430 break;
1431 case LL_OP_RD:
1432
1433 tmp |= LLOP_RE;
1434 break;
1435 }
1436 if (last_op)
1437
1438 tmp |= LLOP_RETURN_IDLE;
1439
1440 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
1441
1442 brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
1443 (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
1444
1445 brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
1446 return brcmnand_waitfunc(chip);
1447}
1448
1449static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
1450 int column, int page_addr)
1451{
1452 struct mtd_info *mtd = nand_to_mtd(chip);
1453 struct brcmnand_host *host = nand_get_controller_data(chip);
1454 struct brcmnand_controller *ctrl = host->ctrl;
1455 u64 addr = (u64)page_addr << chip->page_shift;
1456 int native_cmd = 0;
1457
1458 if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
1459 command == NAND_CMD_RNDOUT)
1460 addr = (u64)column;
1461
1462 else if (page_addr < 0)
1463 addr = 0;
1464
1465 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
1466 (unsigned long long)addr);
1467
1468 host->last_cmd = command;
1469 host->last_byte = 0;
1470 host->last_addr = addr;
1471
1472 switch (command) {
1473 case NAND_CMD_RESET:
1474 native_cmd = CMD_FLASH_RESET;
1475 break;
1476 case NAND_CMD_STATUS:
1477 native_cmd = CMD_STATUS_READ;
1478 break;
1479 case NAND_CMD_READID:
1480 native_cmd = CMD_DEVICE_ID_READ;
1481 break;
1482 case NAND_CMD_READOOB:
1483 native_cmd = CMD_SPARE_AREA_READ;
1484 break;
1485 case NAND_CMD_ERASE1:
1486 native_cmd = CMD_BLOCK_ERASE;
1487 brcmnand_wp(mtd, 0);
1488 break;
1489 case NAND_CMD_PARAM:
1490 native_cmd = CMD_PARAMETER_READ;
1491 break;
1492 case NAND_CMD_SET_FEATURES:
1493 case NAND_CMD_GET_FEATURES:
1494 brcmnand_low_level_op(host, LL_OP_CMD, command, false);
1495 brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
1496 break;
1497 case NAND_CMD_RNDOUT:
1498 native_cmd = CMD_PARAMETER_CHANGE_COL;
1499 addr &= ~((u64)(FC_BYTES - 1));
1500
1501
1502
1503
1504 if (brcmnand_get_sector_size_1k(host)) {
1505 host->hwcfg.sector_size_1k =
1506 brcmnand_get_sector_size_1k(host);
1507 brcmnand_set_sector_size_1k(host, 0);
1508 }
1509 break;
1510 }
1511
1512 if (!native_cmd)
1513 return;
1514
1515 brcmnand_set_cmd_addr(mtd, addr);
1516 brcmnand_send_cmd(host, native_cmd);
1517 brcmnand_waitfunc(chip);
1518
1519 if (native_cmd == CMD_PARAMETER_READ ||
1520 native_cmd == CMD_PARAMETER_CHANGE_COL) {
1521
1522 u32 *flash_cache = (u32 *)ctrl->flash_cache;
1523 int i;
1524
1525 brcmnand_soc_data_bus_prepare(ctrl->soc, true);
1526
1527
1528
1529
1530
1531 for (i = 0; i < FC_WORDS; i++)
1532
1533
1534
1535
1536 flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
1537
1538 brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
1539
1540
1541 if (host->hwcfg.sector_size_1k)
1542 brcmnand_set_sector_size_1k(host,
1543 host->hwcfg.sector_size_1k);
1544 }
1545
1546
1547 if (command == NAND_CMD_ERASE1)
1548 brcmnand_wp(mtd, 1);
1549}
1550
1551static uint8_t brcmnand_read_byte(struct nand_chip *chip)
1552{
1553 struct brcmnand_host *host = nand_get_controller_data(chip);
1554 struct brcmnand_controller *ctrl = host->ctrl;
1555 uint8_t ret = 0;
1556 int addr, offs;
1557
1558 switch (host->last_cmd) {
1559 case NAND_CMD_READID:
1560 if (host->last_byte < 4)
1561 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
1562 (24 - (host->last_byte << 3));
1563 else if (host->last_byte < 8)
1564 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
1565 (56 - (host->last_byte << 3));
1566 break;
1567
1568 case NAND_CMD_READOOB:
1569 ret = oob_reg_read(ctrl, host->last_byte);
1570 break;
1571
1572 case NAND_CMD_STATUS:
1573 ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1574 INTFC_FLASH_STATUS;
1575 if (wp_on)
1576 ret |= NAND_STATUS_WP;
1577 break;
1578
1579 case NAND_CMD_PARAM:
1580 case NAND_CMD_RNDOUT:
1581 addr = host->last_addr + host->last_byte;
1582 offs = addr & (FC_BYTES - 1);
1583
1584
1585 if (host->last_byte > 0 && offs == 0)
1586 nand_change_read_column_op(chip, addr, NULL, 0, false);
1587
1588 ret = ctrl->flash_cache[offs];
1589 break;
1590 case NAND_CMD_GET_FEATURES:
1591 if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
1592 ret = 0;
1593 } else {
1594 bool last = host->last_byte ==
1595 ONFI_SUBFEATURE_PARAM_LEN - 1;
1596 brcmnand_low_level_op(host, LL_OP_RD, 0, last);
1597 ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
1598 }
1599 }
1600
1601 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
1602 host->last_byte++;
1603
1604 return ret;
1605}
1606
1607static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
1608{
1609 int i;
1610
1611 for (i = 0; i < len; i++, buf++)
1612 *buf = brcmnand_read_byte(chip);
1613}
1614
1615static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
1616 int len)
1617{
1618 int i;
1619 struct brcmnand_host *host = nand_get_controller_data(chip);
1620
1621 switch (host->last_cmd) {
1622 case NAND_CMD_SET_FEATURES:
1623 for (i = 0; i < len; i++)
1624 brcmnand_low_level_op(host, LL_OP_WR, buf[i],
1625 (i + 1) == len);
1626 break;
1627 default:
1628 BUG();
1629 break;
1630 }
1631}
1632
1633
1634
1635
1636
1637
1638
1639static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
1640 struct brcm_nand_dma_desc *desc, u64 addr,
1641 dma_addr_t buf, u32 len, u8 dma_cmd,
1642 bool begin, bool end,
1643 dma_addr_t next_desc)
1644{
1645 memset(desc, 0, sizeof(*desc));
1646
1647 desc->next_desc = lower_32_bits(next_desc);
1648 desc->next_desc_ext = upper_32_bits(next_desc);
1649 desc->cmd_irq = (dma_cmd << 24) |
1650 (end ? (0x03 << 8) : 0) |
1651 (!!begin) | ((!!end) << 1);
1652#ifdef CONFIG_CPU_BIG_ENDIAN
1653 desc->cmd_irq |= 0x01 << 12;
1654#endif
1655 desc->dram_addr = lower_32_bits(buf);
1656 desc->dram_addr_ext = upper_32_bits(buf);
1657 desc->tfr_len = len;
1658 desc->total_len = len;
1659 desc->flash_addr = lower_32_bits(addr);
1660 desc->flash_addr_ext = upper_32_bits(addr);
1661 desc->cs = host->cs;
1662 desc->status_valid = 0x01;
1663 return 0;
1664}
1665
1666
1667
1668
1669static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
1670{
1671 struct brcmnand_controller *ctrl = host->ctrl;
1672 unsigned long timeo = msecs_to_jiffies(100);
1673
1674 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
1675 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
1676 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
1677 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
1678
1679
1680 ctrl->dma_pending = true;
1681 mb();
1682 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03);
1683
1684 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
1685 dev_err(ctrl->dev,
1686 "timeout waiting for DMA; status %#x, error status %#x\n",
1687 flash_dma_readl(ctrl, FLASH_DMA_STATUS),
1688 flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
1689 }
1690 ctrl->dma_pending = false;
1691 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0);
1692}
1693
1694static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
1695 u32 len, u8 dma_cmd)
1696{
1697 struct brcmnand_controller *ctrl = host->ctrl;
1698 dma_addr_t buf_pa;
1699 int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1700
1701 buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
1702 if (dma_mapping_error(ctrl->dev, buf_pa)) {
1703 dev_err(ctrl->dev, "unable to map buffer for DMA\n");
1704 return -ENOMEM;
1705 }
1706
1707 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
1708 dma_cmd, true, true, 0);
1709
1710 brcmnand_dma_run(host, ctrl->dma_pa);
1711
1712 dma_unmap_single(ctrl->dev, buf_pa, len, dir);
1713
1714 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
1715 return -EBADMSG;
1716 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
1717 return -EUCLEAN;
1718
1719 return 0;
1720}
1721
1722
1723
1724
1725static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
1726 u64 addr, unsigned int trans, u32 *buf,
1727 u8 *oob, u64 *err_addr)
1728{
1729 struct brcmnand_host *host = nand_get_controller_data(chip);
1730 struct brcmnand_controller *ctrl = host->ctrl;
1731 int i, j, ret = 0;
1732
1733 brcmnand_clear_ecc_addr(ctrl);
1734
1735 for (i = 0; i < trans; i++, addr += FC_BYTES) {
1736 brcmnand_set_cmd_addr(mtd, addr);
1737
1738 brcmnand_send_cmd(host, CMD_PAGE_READ);
1739 brcmnand_waitfunc(chip);
1740
1741 if (likely(buf)) {
1742 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
1743
1744 for (j = 0; j < FC_WORDS; j++, buf++)
1745 *buf = brcmnand_read_fc(ctrl, j);
1746
1747 brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
1748 }
1749
1750 if (oob)
1751 oob += read_oob_from_regs(ctrl, i, oob,
1752 mtd->oobsize / trans,
1753 host->hwcfg.sector_size_1k);
1754
1755 if (!ret) {
1756 *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
1757
1758 if (*err_addr)
1759 ret = -EBADMSG;
1760 }
1761
1762 if (!ret) {
1763 *err_addr = brcmnand_get_correcc_addr(ctrl);
1764
1765 if (*err_addr)
1766 ret = -EUCLEAN;
1767 }
1768 }
1769
1770 return ret;
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
1788 struct nand_chip *chip, void *buf, u64 addr)
1789{
1790 int i, sas;
1791 void *oob = chip->oob_poi;
1792 int bitflips = 0;
1793 int page = addr >> chip->page_shift;
1794 int ret;
1795 void *ecc_chunk;
1796
1797 if (!buf)
1798 buf = nand_get_data_buf(chip);
1799
1800 sas = mtd->oobsize / chip->ecc.steps;
1801
1802
1803 ret = chip->ecc.read_page_raw(chip, buf, true, page);
1804 if (ret)
1805 return ret;
1806
1807 for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
1808 ecc_chunk = buf + chip->ecc.size * i;
1809 ret = nand_check_erased_ecc_chunk(ecc_chunk,
1810 chip->ecc.size,
1811 oob, sas, NULL, 0,
1812 chip->ecc.strength);
1813 if (ret < 0)
1814 return ret;
1815
1816 bitflips = max(bitflips, ret);
1817 }
1818
1819 return bitflips;
1820}
1821
1822static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
1823 u64 addr, unsigned int trans, u32 *buf, u8 *oob)
1824{
1825 struct brcmnand_host *host = nand_get_controller_data(chip);
1826 struct brcmnand_controller *ctrl = host->ctrl;
1827 u64 err_addr = 0;
1828 int err;
1829 bool retry = true;
1830
1831 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
1832
1833try_dmaread:
1834 brcmnand_clear_ecc_addr(ctrl);
1835
1836 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
1837 err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
1838 CMD_PAGE_READ);
1839 if (err) {
1840 if (mtd_is_bitflip_or_eccerr(err))
1841 err_addr = addr;
1842 else
1843 return -EIO;
1844 }
1845 } else {
1846 if (oob)
1847 memset(oob, 0x99, mtd->oobsize);
1848
1849 err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
1850 oob, &err_addr);
1851 }
1852
1853 if (mtd_is_eccerr(err)) {
1854
1855
1856
1857
1858
1859
1860
1861
1862 if ((ctrl->nand_version == 0x0700) ||
1863 (ctrl->nand_version == 0x0701)) {
1864 if (retry) {
1865 retry = false;
1866 goto try_dmaread;
1867 }
1868 }
1869
1870
1871
1872
1873
1874 if (ctrl->nand_version < 0x0702) {
1875 err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
1876 addr);
1877
1878 if (err >= 0)
1879 return err;
1880 }
1881
1882 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
1883 (unsigned long long)err_addr);
1884 mtd->ecc_stats.failed++;
1885
1886 return 0;
1887 }
1888
1889 if (mtd_is_bitflip(err)) {
1890 unsigned int corrected = brcmnand_count_corrected(ctrl);
1891
1892 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
1893 (unsigned long long)err_addr);
1894 mtd->ecc_stats.corrected += corrected;
1895
1896 return max(mtd->bitflip_threshold, corrected);
1897 }
1898
1899 return 0;
1900}
1901
1902static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
1903 int oob_required, int page)
1904{
1905 struct mtd_info *mtd = nand_to_mtd(chip);
1906 struct brcmnand_host *host = nand_get_controller_data(chip);
1907 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
1908
1909 nand_read_page_op(chip, page, 0, NULL, 0);
1910
1911 return brcmnand_read(mtd, chip, host->last_addr,
1912 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
1913}
1914
1915static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1916 int oob_required, int page)
1917{
1918 struct brcmnand_host *host = nand_get_controller_data(chip);
1919 struct mtd_info *mtd = nand_to_mtd(chip);
1920 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
1921 int ret;
1922
1923 nand_read_page_op(chip, page, 0, NULL, 0);
1924
1925 brcmnand_set_ecc_enabled(host, 0);
1926 ret = brcmnand_read(mtd, chip, host->last_addr,
1927 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
1928 brcmnand_set_ecc_enabled(host, 1);
1929 return ret;
1930}
1931
1932static int brcmnand_read_oob(struct nand_chip *chip, int page)
1933{
1934 struct mtd_info *mtd = nand_to_mtd(chip);
1935
1936 return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
1937 mtd->writesize >> FC_SHIFT,
1938 NULL, (u8 *)chip->oob_poi);
1939}
1940
1941static int brcmnand_read_oob_raw(struct nand_chip *chip, int page)
1942{
1943 struct mtd_info *mtd = nand_to_mtd(chip);
1944 struct brcmnand_host *host = nand_get_controller_data(chip);
1945
1946 brcmnand_set_ecc_enabled(host, 0);
1947 brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
1948 mtd->writesize >> FC_SHIFT,
1949 NULL, (u8 *)chip->oob_poi);
1950 brcmnand_set_ecc_enabled(host, 1);
1951 return 0;
1952}
1953
1954static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
1955 u64 addr, const u32 *buf, u8 *oob)
1956{
1957 struct brcmnand_host *host = nand_get_controller_data(chip);
1958 struct brcmnand_controller *ctrl = host->ctrl;
1959 unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
1960 int status, ret = 0;
1961
1962 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
1963
1964 if (unlikely((unsigned long)buf & 0x03)) {
1965 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
1966 buf = (u32 *)((unsigned long)buf & ~0x03);
1967 }
1968
1969 brcmnand_wp(mtd, 0);
1970
1971 for (i = 0; i < ctrl->max_oob; i += 4)
1972 oob_reg_write(ctrl, i, 0xffffffff);
1973
1974 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
1975 if (brcmnand_dma_trans(host, addr, (u32 *)buf,
1976 mtd->writesize, CMD_PROGRAM_PAGE))
1977 ret = -EIO;
1978 goto out;
1979 }
1980
1981 for (i = 0; i < trans; i++, addr += FC_BYTES) {
1982
1983 brcmnand_set_cmd_addr(mtd, addr);
1984
1985 if (buf) {
1986 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
1987
1988 for (j = 0; j < FC_WORDS; j++, buf++)
1989 brcmnand_write_fc(ctrl, j, *buf);
1990
1991 brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
1992 } else if (oob) {
1993 for (j = 0; j < FC_WORDS; j++)
1994 brcmnand_write_fc(ctrl, j, 0xffffffff);
1995 }
1996
1997 if (oob) {
1998 oob += write_oob_to_regs(ctrl, i, oob,
1999 mtd->oobsize / trans,
2000 host->hwcfg.sector_size_1k);
2001 }
2002
2003
2004 brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
2005 status = brcmnand_waitfunc(chip);
2006
2007 if (status & NAND_STATUS_FAIL) {
2008 dev_info(ctrl->dev, "program failed at %llx\n",
2009 (unsigned long long)addr);
2010 ret = -EIO;
2011 goto out;
2012 }
2013 }
2014out:
2015 brcmnand_wp(mtd, 1);
2016 return ret;
2017}
2018
2019static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
2020 int oob_required, int page)
2021{
2022 struct mtd_info *mtd = nand_to_mtd(chip);
2023 struct brcmnand_host *host = nand_get_controller_data(chip);
2024 void *oob = oob_required ? chip->oob_poi : NULL;
2025
2026 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2027 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
2028
2029 return nand_prog_page_end_op(chip);
2030}
2031
2032static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
2033 int oob_required, int page)
2034{
2035 struct mtd_info *mtd = nand_to_mtd(chip);
2036 struct brcmnand_host *host = nand_get_controller_data(chip);
2037 void *oob = oob_required ? chip->oob_poi : NULL;
2038
2039 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2040 brcmnand_set_ecc_enabled(host, 0);
2041 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
2042 brcmnand_set_ecc_enabled(host, 1);
2043
2044 return nand_prog_page_end_op(chip);
2045}
2046
2047static int brcmnand_write_oob(struct nand_chip *chip, int page)
2048{
2049 return brcmnand_write(nand_to_mtd(chip), chip,
2050 (u64)page << chip->page_shift, NULL,
2051 chip->oob_poi);
2052}
2053
2054static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
2055{
2056 struct mtd_info *mtd = nand_to_mtd(chip);
2057 struct brcmnand_host *host = nand_get_controller_data(chip);
2058 int ret;
2059
2060 brcmnand_set_ecc_enabled(host, 0);
2061 ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
2062 (u8 *)chip->oob_poi);
2063 brcmnand_set_ecc_enabled(host, 1);
2064
2065 return ret;
2066}
2067
2068
2069
2070
2071
2072static int brcmnand_set_cfg(struct brcmnand_host *host,
2073 struct brcmnand_cfg *cfg)
2074{
2075 struct brcmnand_controller *ctrl = host->ctrl;
2076 struct nand_chip *chip = &host->chip;
2077 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2078 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2079 BRCMNAND_CS_CFG_EXT);
2080 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2081 BRCMNAND_CS_ACC_CONTROL);
2082 u8 block_size = 0, page_size = 0, device_size = 0;
2083 u32 tmp;
2084
2085 if (ctrl->block_sizes) {
2086 int i, found;
2087
2088 for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
2089 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
2090 block_size = i;
2091 found = 1;
2092 }
2093 if (!found) {
2094 dev_warn(ctrl->dev, "invalid block size %u\n",
2095 cfg->block_size);
2096 return -EINVAL;
2097 }
2098 } else {
2099 block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
2100 }
2101
2102 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
2103 cfg->block_size > ctrl->max_block_size)) {
2104 dev_warn(ctrl->dev, "invalid block size %u\n",
2105 cfg->block_size);
2106 block_size = 0;
2107 }
2108
2109 if (ctrl->page_sizes) {
2110 int i, found;
2111
2112 for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
2113 if (ctrl->page_sizes[i] == cfg->page_size) {
2114 page_size = i;
2115 found = 1;
2116 }
2117 if (!found) {
2118 dev_warn(ctrl->dev, "invalid page size %u\n",
2119 cfg->page_size);
2120 return -EINVAL;
2121 }
2122 } else {
2123 page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
2124 }
2125
2126 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
2127 cfg->page_size > ctrl->max_page_size)) {
2128 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
2129 return -EINVAL;
2130 }
2131
2132 if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
2133 dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
2134 (unsigned long long)cfg->device_size);
2135 return -EINVAL;
2136 }
2137 device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
2138
2139 tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
2140 (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
2141 (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
2142 (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
2143 (device_size << CFG_DEVICE_SIZE_SHIFT);
2144 if (cfg_offs == cfg_ext_offs) {
2145 tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
2146 (block_size << CFG_BLK_SIZE_SHIFT);
2147 nand_writereg(ctrl, cfg_offs, tmp);
2148 } else {
2149 nand_writereg(ctrl, cfg_offs, tmp);
2150 tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
2151 (block_size << CFG_EXT_BLK_SIZE_SHIFT);
2152 nand_writereg(ctrl, cfg_ext_offs, tmp);
2153 }
2154
2155 tmp = nand_readreg(ctrl, acc_control_offs);
2156 tmp &= ~brcmnand_ecc_level_mask(ctrl);
2157 tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
2158 tmp &= ~brcmnand_spare_area_mask(ctrl);
2159 tmp |= cfg->spare_area_size;
2160 nand_writereg(ctrl, acc_control_offs, tmp);
2161
2162 brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
2163
2164
2165 brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
2166
2167 return 0;
2168}
2169
2170static void brcmnand_print_cfg(struct brcmnand_host *host,
2171 char *buf, struct brcmnand_cfg *cfg)
2172{
2173 buf += sprintf(buf,
2174 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
2175 (unsigned long long)cfg->device_size >> 20,
2176 cfg->block_size >> 10,
2177 cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
2178 cfg->page_size >= 1024 ? "KiB" : "B",
2179 cfg->spare_area_size, cfg->device_width);
2180
2181
2182 if (is_hamming_ecc(host->ctrl, cfg))
2183 sprintf(buf, ", Hamming ECC");
2184 else if (cfg->sector_size_1k)
2185 sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
2186 else
2187 sprintf(buf, ", BCH-%u", cfg->ecc_level);
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197static inline int get_blk_adr_bytes(u64 size, u32 writesize)
2198{
2199 return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
2200}
2201
2202static int brcmnand_setup_dev(struct brcmnand_host *host)
2203{
2204 struct mtd_info *mtd = nand_to_mtd(&host->chip);
2205 struct nand_chip *chip = &host->chip;
2206 struct brcmnand_controller *ctrl = host->ctrl;
2207 struct brcmnand_cfg *cfg = &host->hwcfg;
2208 char msg[128];
2209 u32 offs, tmp, oob_sector;
2210 int ret;
2211
2212 memset(cfg, 0, sizeof(*cfg));
2213
2214 ret = of_property_read_u32(nand_get_flash_node(chip),
2215 "brcm,nand-oob-sector-size",
2216 &oob_sector);
2217 if (ret) {
2218
2219 cfg->spare_area_size = mtd->oobsize /
2220 (mtd->writesize >> FC_SHIFT);
2221 } else {
2222 cfg->spare_area_size = oob_sector;
2223 }
2224 if (cfg->spare_area_size > ctrl->max_oob)
2225 cfg->spare_area_size = ctrl->max_oob;
2226
2227
2228
2229
2230 mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
2231
2232 cfg->device_size = mtd->size;
2233 cfg->block_size = mtd->erasesize;
2234 cfg->page_size = mtd->writesize;
2235 cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
2236 cfg->col_adr_bytes = 2;
2237 cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
2238
2239 if (chip->ecc.mode != NAND_ECC_HW) {
2240 dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
2241 chip->ecc.mode);
2242 return -EINVAL;
2243 }
2244
2245 if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
2246 if (chip->ecc.strength == 1 && chip->ecc.size == 512)
2247
2248 chip->ecc.algo = NAND_ECC_HAMMING;
2249 else
2250
2251 chip->ecc.algo = NAND_ECC_BCH;
2252 }
2253
2254 if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 ||
2255 chip->ecc.size != 512)) {
2256 dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
2257 chip->ecc.strength, chip->ecc.size);
2258 return -EINVAL;
2259 }
2260
2261 if (chip->ecc.mode != NAND_ECC_NONE &&
2262 (!chip->ecc.size || !chip->ecc.strength)) {
2263 if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
2264
2265 chip->ecc.size = chip->base.eccreq.step_size;
2266 chip->ecc.strength = chip->base.eccreq.strength;
2267 dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
2268 chip->ecc.size, chip->ecc.strength);
2269 }
2270 }
2271
2272 switch (chip->ecc.size) {
2273 case 512:
2274 if (chip->ecc.algo == NAND_ECC_HAMMING)
2275 cfg->ecc_level = 15;
2276 else
2277 cfg->ecc_level = chip->ecc.strength;
2278 cfg->sector_size_1k = 0;
2279 break;
2280 case 1024:
2281 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
2282 dev_err(ctrl->dev, "1KB sectors not supported\n");
2283 return -EINVAL;
2284 }
2285 if (chip->ecc.strength & 0x1) {
2286 dev_err(ctrl->dev,
2287 "odd ECC not supported with 1KB sectors\n");
2288 return -EINVAL;
2289 }
2290
2291 cfg->ecc_level = chip->ecc.strength >> 1;
2292 cfg->sector_size_1k = 1;
2293 break;
2294 default:
2295 dev_err(ctrl->dev, "unsupported ECC size: %d\n",
2296 chip->ecc.size);
2297 return -EINVAL;
2298 }
2299
2300 cfg->ful_adr_bytes = cfg->blk_adr_bytes;
2301 if (mtd->writesize > 512)
2302 cfg->ful_adr_bytes += cfg->col_adr_bytes;
2303 else
2304 cfg->ful_adr_bytes += 1;
2305
2306 ret = brcmnand_set_cfg(host, cfg);
2307 if (ret)
2308 return ret;
2309
2310 brcmnand_set_ecc_enabled(host, 1);
2311
2312 brcmnand_print_cfg(host, msg, cfg);
2313 dev_info(ctrl->dev, "detected %s\n", msg);
2314
2315
2316 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
2317 tmp = nand_readreg(ctrl, offs);
2318 tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
2319 tmp &= ~ACC_CONTROL_RD_ERASED;
2320
2321
2322 if (ctrl->nand_version >= 0x0702)
2323 tmp |= ACC_CONTROL_RD_ERASED;
2324 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
2325 if (ctrl->features & BRCMNAND_HAS_PREFETCH)
2326 tmp &= ~ACC_CONTROL_PREFETCH;
2327
2328 nand_writereg(ctrl, offs, tmp);
2329
2330 return 0;
2331}
2332
2333static int brcmnand_attach_chip(struct nand_chip *chip)
2334{
2335 struct mtd_info *mtd = nand_to_mtd(chip);
2336 struct brcmnand_host *host = nand_get_controller_data(chip);
2337 int ret;
2338
2339 chip->options |= NAND_NO_SUBPAGE_WRITE;
2340
2341
2342
2343
2344
2345 chip->options |= NAND_USE_BOUNCE_BUFFER;
2346
2347 if (chip->bbt_options & NAND_BBT_USE_FLASH)
2348 chip->bbt_options |= NAND_BBT_NO_OOB;
2349
2350 if (brcmnand_setup_dev(host))
2351 return -ENXIO;
2352
2353 chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
2354
2355
2356 mtd->bitflip_threshold = 1;
2357
2358 ret = brcmstb_choose_ecc_layout(host);
2359
2360 return ret;
2361}
2362
2363static const struct nand_controller_ops brcmnand_controller_ops = {
2364 .attach_chip = brcmnand_attach_chip,
2365};
2366
2367static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
2368{
2369 struct brcmnand_controller *ctrl = host->ctrl;
2370 struct platform_device *pdev = host->pdev;
2371 struct mtd_info *mtd;
2372 struct nand_chip *chip;
2373 int ret;
2374 u16 cfg_offs;
2375
2376 ret = of_property_read_u32(dn, "reg", &host->cs);
2377 if (ret) {
2378 dev_err(&pdev->dev, "can't get chip-select\n");
2379 return -ENXIO;
2380 }
2381
2382 mtd = nand_to_mtd(&host->chip);
2383 chip = &host->chip;
2384
2385 nand_set_flash_node(chip, dn);
2386 nand_set_controller_data(chip, host);
2387 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
2388 host->cs);
2389 if (!mtd->name)
2390 return -ENOMEM;
2391
2392 mtd->owner = THIS_MODULE;
2393 mtd->dev.parent = &pdev->dev;
2394
2395 chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
2396 chip->legacy.cmdfunc = brcmnand_cmdfunc;
2397 chip->legacy.waitfunc = brcmnand_waitfunc;
2398 chip->legacy.read_byte = brcmnand_read_byte;
2399 chip->legacy.read_buf = brcmnand_read_buf;
2400 chip->legacy.write_buf = brcmnand_write_buf;
2401
2402 chip->ecc.mode = NAND_ECC_HW;
2403 chip->ecc.read_page = brcmnand_read_page;
2404 chip->ecc.write_page = brcmnand_write_page;
2405 chip->ecc.read_page_raw = brcmnand_read_page_raw;
2406 chip->ecc.write_page_raw = brcmnand_write_page_raw;
2407 chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
2408 chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
2409 chip->ecc.read_oob = brcmnand_read_oob;
2410 chip->ecc.write_oob = brcmnand_write_oob;
2411
2412 chip->controller = &ctrl->controller;
2413
2414
2415
2416
2417
2418
2419 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2420 nand_writereg(ctrl, cfg_offs,
2421 nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
2422
2423 ret = nand_scan(chip, 1);
2424 if (ret)
2425 return ret;
2426
2427 ret = mtd_device_register(mtd, NULL, 0);
2428 if (ret)
2429 nand_cleanup(chip);
2430
2431 return ret;
2432}
2433
2434static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
2435 int restore)
2436{
2437 struct brcmnand_controller *ctrl = host->ctrl;
2438 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2439 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2440 BRCMNAND_CS_CFG_EXT);
2441 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2442 BRCMNAND_CS_ACC_CONTROL);
2443 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
2444 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
2445
2446 if (restore) {
2447 nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
2448 if (cfg_offs != cfg_ext_offs)
2449 nand_writereg(ctrl, cfg_ext_offs,
2450 host->hwcfg.config_ext);
2451 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
2452 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
2453 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
2454 } else {
2455 host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
2456 if (cfg_offs != cfg_ext_offs)
2457 host->hwcfg.config_ext =
2458 nand_readreg(ctrl, cfg_ext_offs);
2459 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
2460 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
2461 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
2462 }
2463}
2464
2465static int brcmnand_suspend(struct device *dev)
2466{
2467 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2468 struct brcmnand_host *host;
2469
2470 list_for_each_entry(host, &ctrl->host_list, node)
2471 brcmnand_save_restore_cs_config(host, 0);
2472
2473 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
2474 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
2475 ctrl->corr_stat_threshold =
2476 brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
2477
2478 if (has_flash_dma(ctrl))
2479 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
2480
2481 return 0;
2482}
2483
2484static int brcmnand_resume(struct device *dev)
2485{
2486 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2487 struct brcmnand_host *host;
2488
2489 if (has_flash_dma(ctrl)) {
2490 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
2491 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2492 }
2493
2494 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
2495 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
2496 brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
2497 ctrl->corr_stat_threshold);
2498 if (ctrl->soc) {
2499
2500 ctrl->soc->ctlrdy_ack(ctrl->soc);
2501 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2502 }
2503
2504 list_for_each_entry(host, &ctrl->host_list, node) {
2505 struct nand_chip *chip = &host->chip;
2506
2507 brcmnand_save_restore_cs_config(host, 1);
2508
2509
2510 nand_reset_op(chip);
2511 }
2512
2513 return 0;
2514}
2515
2516const struct dev_pm_ops brcmnand_pm_ops = {
2517 .suspend = brcmnand_suspend,
2518 .resume = brcmnand_resume,
2519};
2520EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
2521
2522static const struct of_device_id brcmnand_of_match[] = {
2523 { .compatible = "brcm,brcmnand-v4.0" },
2524 { .compatible = "brcm,brcmnand-v5.0" },
2525 { .compatible = "brcm,brcmnand-v6.0" },
2526 { .compatible = "brcm,brcmnand-v6.1" },
2527 { .compatible = "brcm,brcmnand-v6.2" },
2528 { .compatible = "brcm,brcmnand-v7.0" },
2529 { .compatible = "brcm,brcmnand-v7.1" },
2530 { .compatible = "brcm,brcmnand-v7.2" },
2531 { .compatible = "brcm,brcmnand-v7.3" },
2532 {},
2533};
2534MODULE_DEVICE_TABLE(of, brcmnand_of_match);
2535
2536
2537
2538
2539
2540int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
2541{
2542 struct device *dev = &pdev->dev;
2543 struct device_node *dn = dev->of_node, *child;
2544 struct brcmnand_controller *ctrl;
2545 struct resource *res;
2546 int ret;
2547
2548
2549 if (!dn)
2550 return -ENODEV;
2551
2552 if (!of_match_node(brcmnand_of_match, dn))
2553 return -ENODEV;
2554
2555 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
2556 if (!ctrl)
2557 return -ENOMEM;
2558
2559 dev_set_drvdata(dev, ctrl);
2560 ctrl->dev = dev;
2561
2562 init_completion(&ctrl->done);
2563 init_completion(&ctrl->dma_done);
2564 nand_controller_init(&ctrl->controller);
2565 ctrl->controller.ops = &brcmnand_controller_ops;
2566 INIT_LIST_HEAD(&ctrl->host_list);
2567
2568
2569 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2570 ctrl->nand_base = devm_ioremap_resource(dev, res);
2571 if (IS_ERR(ctrl->nand_base))
2572 return PTR_ERR(ctrl->nand_base);
2573
2574
2575 ctrl->clk = devm_clk_get(dev, "nand");
2576 if (!IS_ERR(ctrl->clk)) {
2577 ret = clk_prepare_enable(ctrl->clk);
2578 if (ret)
2579 return ret;
2580 } else {
2581 ret = PTR_ERR(ctrl->clk);
2582 if (ret == -EPROBE_DEFER)
2583 return ret;
2584
2585 ctrl->clk = NULL;
2586 }
2587
2588
2589 ret = brcmnand_revision_init(ctrl);
2590 if (ret)
2591 goto err;
2592
2593
2594
2595
2596
2597 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
2598 if (res) {
2599 ctrl->nand_fc = devm_ioremap_resource(dev, res);
2600 if (IS_ERR(ctrl->nand_fc)) {
2601 ret = PTR_ERR(ctrl->nand_fc);
2602 goto err;
2603 }
2604 } else {
2605 ctrl->nand_fc = ctrl->nand_base +
2606 ctrl->reg_offsets[BRCMNAND_FC_BASE];
2607 }
2608
2609
2610 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
2611 if (res) {
2612 ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
2613 if (IS_ERR(ctrl->flash_dma_base)) {
2614 ret = PTR_ERR(ctrl->flash_dma_base);
2615 goto err;
2616 }
2617
2618
2619 brcmnand_flash_dma_revision_init(ctrl);
2620
2621
2622 flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
2623 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2624
2625
2626 ctrl->dma_desc = dmam_alloc_coherent(dev,
2627 sizeof(*ctrl->dma_desc),
2628 &ctrl->dma_pa, GFP_KERNEL);
2629 if (!ctrl->dma_desc) {
2630 ret = -ENOMEM;
2631 goto err;
2632 }
2633
2634 ctrl->dma_irq = platform_get_irq(pdev, 1);
2635 if ((int)ctrl->dma_irq < 0) {
2636 dev_err(dev, "missing FLASH_DMA IRQ\n");
2637 ret = -ENODEV;
2638 goto err;
2639 }
2640
2641 ret = devm_request_irq(dev, ctrl->dma_irq,
2642 brcmnand_dma_irq, 0, DRV_NAME,
2643 ctrl);
2644 if (ret < 0) {
2645 dev_err(dev, "can't allocate IRQ %d: error %d\n",
2646 ctrl->dma_irq, ret);
2647 goto err;
2648 }
2649
2650 dev_info(dev, "enabling FLASH_DMA\n");
2651 }
2652
2653
2654 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
2655 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
2656
2657 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
2658
2659 if (ctrl->features & BRCMNAND_HAS_WP) {
2660
2661 if (wp_on == 2)
2662 brcmnand_set_wp(ctrl, false);
2663 } else {
2664 wp_on = 0;
2665 }
2666
2667
2668 ctrl->irq = platform_get_irq(pdev, 0);
2669 if ((int)ctrl->irq < 0) {
2670 dev_err(dev, "no IRQ defined\n");
2671 ret = -ENODEV;
2672 goto err;
2673 }
2674
2675
2676
2677
2678
2679 if (soc) {
2680 ctrl->soc = soc;
2681
2682 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
2683 DRV_NAME, ctrl);
2684
2685
2686 ctrl->soc->ctlrdy_ack(ctrl->soc);
2687 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2688 } else {
2689
2690 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
2691 DRV_NAME, ctrl);
2692 }
2693 if (ret < 0) {
2694 dev_err(dev, "can't allocate IRQ %d: error %d\n",
2695 ctrl->irq, ret);
2696 goto err;
2697 }
2698
2699 for_each_available_child_of_node(dn, child) {
2700 if (of_device_is_compatible(child, "brcm,nandcs")) {
2701 struct brcmnand_host *host;
2702
2703 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2704 if (!host) {
2705 of_node_put(child);
2706 ret = -ENOMEM;
2707 goto err;
2708 }
2709 host->pdev = pdev;
2710 host->ctrl = ctrl;
2711
2712 ret = brcmnand_init_cs(host, child);
2713 if (ret) {
2714 devm_kfree(dev, host);
2715 continue;
2716 }
2717
2718 list_add_tail(&host->node, &ctrl->host_list);
2719 }
2720 }
2721
2722
2723 if (list_empty(&ctrl->host_list)) {
2724 ret = -ENODEV;
2725 goto err;
2726 }
2727
2728 return 0;
2729
2730err:
2731 clk_disable_unprepare(ctrl->clk);
2732 return ret;
2733
2734}
2735EXPORT_SYMBOL_GPL(brcmnand_probe);
2736
2737int brcmnand_remove(struct platform_device *pdev)
2738{
2739 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
2740 struct brcmnand_host *host;
2741
2742 list_for_each_entry(host, &ctrl->host_list, node)
2743 nand_release(&host->chip);
2744
2745 clk_disable_unprepare(ctrl->clk);
2746
2747 dev_set_drvdata(&pdev->dev, NULL);
2748
2749 return 0;
2750}
2751EXPORT_SYMBOL_GPL(brcmnand_remove);
2752
2753MODULE_LICENSE("GPL v2");
2754MODULE_AUTHOR("Kevin Cernekee");
2755MODULE_AUTHOR("Brian Norris");
2756MODULE_DESCRIPTION("NAND driver for Broadcom chips");
2757MODULE_ALIAS("platform:brcmnand");
2758