1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/device.h>
25#include <linux/module.h>
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/onenand.h>
28#include <linux/mtd/partitions.h>
29#include <linux/of_device.h>
30#include <linux/omap-gpmc.h>
31#include <linux/platform_device.h>
32#include <linux/interrupt.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/dmaengine.h>
36#include <linux/io.h>
37#include <linux/slab.h>
38#include <linux/gpio/consumer.h>
39
40#include <asm/mach/flash.h>
41
42#define DRIVER_NAME "omap2-onenand"
43
44#define ONENAND_BUFRAM_SIZE (1024 * 5)
45
46struct omap2_onenand {
47 struct platform_device *pdev;
48 int gpmc_cs;
49 unsigned long phys_base;
50 struct gpio_desc *int_gpiod;
51 struct mtd_info mtd;
52 struct onenand_chip onenand;
53 struct completion irq_done;
54 struct completion dma_done;
55 struct dma_chan *dma_chan;
56};
57
58static void omap2_onenand_dma_complete_func(void *completion)
59{
60 complete(completion);
61}
62
63static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
64{
65 struct omap2_onenand *c = dev_id;
66
67 complete(&c->irq_done);
68
69 return IRQ_HANDLED;
70}
71
72static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
73{
74 return readw(c->onenand.base + reg);
75}
76
77static inline void write_reg(struct omap2_onenand *c, unsigned short value,
78 int reg)
79{
80 writew(value, c->onenand.base + reg);
81}
82
83static int omap2_onenand_set_cfg(struct omap2_onenand *c,
84 bool sr, bool sw,
85 int latency, int burst_len)
86{
87 unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
88
89 reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
90
91 switch (burst_len) {
92 case 0:
93 break;
94 case 4:
95 reg |= ONENAND_SYS_CFG1_BL_4;
96 break;
97 case 8:
98 reg |= ONENAND_SYS_CFG1_BL_8;
99 break;
100 case 16:
101 reg |= ONENAND_SYS_CFG1_BL_16;
102 break;
103 case 32:
104 reg |= ONENAND_SYS_CFG1_BL_32;
105 break;
106 default:
107 return -EINVAL;
108 }
109
110 if (latency > 5)
111 reg |= ONENAND_SYS_CFG1_HF;
112 if (latency > 7)
113 reg |= ONENAND_SYS_CFG1_VHF;
114 if (sr)
115 reg |= ONENAND_SYS_CFG1_SYNC_READ;
116 if (sw)
117 reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
118
119 write_reg(c, reg, ONENAND_REG_SYS_CFG1);
120
121 return 0;
122}
123
124static int omap2_onenand_get_freq(int ver)
125{
126 switch ((ver >> 4) & 0xf) {
127 case 0:
128 return 40;
129 case 1:
130 return 54;
131 case 2:
132 return 66;
133 case 3:
134 return 83;
135 case 4:
136 return 104;
137 }
138
139 return -EINVAL;
140}
141
142static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
143{
144 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
145 msg, state, ctrl, intr);
146}
147
148static void wait_warn(char *msg, int state, unsigned int ctrl,
149 unsigned int intr)
150{
151 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
152 "intr 0x%04x\n", msg, state, ctrl, intr);
153}
154
155static int omap2_onenand_wait(struct mtd_info *mtd, int state)
156{
157 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
158 struct onenand_chip *this = mtd->priv;
159 unsigned int intr = 0;
160 unsigned int ctrl, ctrl_mask;
161 unsigned long timeout;
162 u32 syscfg;
163
164 if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
165 state == FL_VERIFYING_ERASE) {
166 int i = 21;
167 unsigned int intr_flags = ONENAND_INT_MASTER;
168
169 switch (state) {
170 case FL_RESETING:
171 intr_flags |= ONENAND_INT_RESET;
172 break;
173 case FL_PREPARING_ERASE:
174 intr_flags |= ONENAND_INT_ERASE;
175 break;
176 case FL_VERIFYING_ERASE:
177 i = 101;
178 break;
179 }
180
181 while (--i) {
182 udelay(1);
183 intr = read_reg(c, ONENAND_REG_INTERRUPT);
184 if (intr & ONENAND_INT_MASTER)
185 break;
186 }
187 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
188 if (ctrl & ONENAND_CTRL_ERROR) {
189 wait_err("controller error", state, ctrl, intr);
190 return -EIO;
191 }
192 if ((intr & intr_flags) == intr_flags)
193 return 0;
194
195 }
196
197 if (state != FL_READING) {
198 int result;
199
200
201 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
202 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
203 syscfg |= ONENAND_SYS_CFG1_IOBE;
204 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
205
206 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
207 }
208
209 reinit_completion(&c->irq_done);
210 result = gpiod_get_value(c->int_gpiod);
211 if (result < 0) {
212 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
213 intr = read_reg(c, ONENAND_REG_INTERRUPT);
214 wait_err("gpio error", state, ctrl, intr);
215 return result;
216 } else if (result == 0) {
217 int retry_cnt = 0;
218retry:
219 if (!wait_for_completion_io_timeout(&c->irq_done,
220 msecs_to_jiffies(20))) {
221
222 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
223 if (ctrl & ONENAND_CTRL_ONGO &&
224 !this->ongoing) {
225
226
227
228
229 retry_cnt += 1;
230 if (retry_cnt < 3)
231 goto retry;
232 intr = read_reg(c,
233 ONENAND_REG_INTERRUPT);
234 wait_err("timeout", state, ctrl, intr);
235 return -EIO;
236 }
237 intr = read_reg(c, ONENAND_REG_INTERRUPT);
238 if ((intr & ONENAND_INT_MASTER) == 0)
239 wait_warn("timeout", state, ctrl, intr);
240 }
241 }
242 } else {
243 int retry_cnt = 0;
244
245
246 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
247 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
248 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
249
250 timeout = jiffies + msecs_to_jiffies(20);
251 while (1) {
252 if (time_before(jiffies, timeout)) {
253 intr = read_reg(c, ONENAND_REG_INTERRUPT);
254 if (intr & ONENAND_INT_MASTER)
255 break;
256 } else {
257
258 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
259 if (ctrl & ONENAND_CTRL_ONGO) {
260
261
262
263
264 retry_cnt += 1;
265 if (retry_cnt < 3) {
266 timeout = jiffies +
267 msecs_to_jiffies(20);
268 continue;
269 }
270 }
271 break;
272 }
273 }
274 }
275
276 intr = read_reg(c, ONENAND_REG_INTERRUPT);
277 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
278
279 if (intr & ONENAND_INT_READ) {
280 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
281
282 if (ecc) {
283 unsigned int addr1, addr8;
284
285 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
286 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
287 if (ecc & ONENAND_ECC_2BIT_ALL) {
288 printk(KERN_ERR "onenand_wait: ECC error = "
289 "0x%04x, addr1 %#x, addr8 %#x\n",
290 ecc, addr1, addr8);
291 mtd->ecc_stats.failed++;
292 return -EBADMSG;
293 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
294 printk(KERN_NOTICE "onenand_wait: correctable "
295 "ECC error = 0x%04x, addr1 %#x, "
296 "addr8 %#x\n", ecc, addr1, addr8);
297 mtd->ecc_stats.corrected++;
298 }
299 }
300 } else if (state == FL_READING) {
301 wait_err("timeout", state, ctrl, intr);
302 return -EIO;
303 }
304
305 if (ctrl & ONENAND_CTRL_ERROR) {
306 wait_err("controller error", state, ctrl, intr);
307 if (ctrl & ONENAND_CTRL_LOCK)
308 printk(KERN_ERR "onenand_wait: "
309 "Device is write protected!!!\n");
310 return -EIO;
311 }
312
313 ctrl_mask = 0xFE9F;
314 if (this->ongoing)
315 ctrl_mask &= ~0x8000;
316
317 if (ctrl & ctrl_mask)
318 wait_warn("unexpected controller status", state, ctrl, intr);
319
320 return 0;
321}
322
323static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
324{
325 struct onenand_chip *this = mtd->priv;
326
327 if (ONENAND_CURRENT_BUFFERRAM(this)) {
328 if (area == ONENAND_DATARAM)
329 return this->writesize;
330 if (area == ONENAND_SPARERAM)
331 return mtd->oobsize;
332 }
333
334 return 0;
335}
336
337static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
338 dma_addr_t src, dma_addr_t dst,
339 size_t count)
340{
341 struct dma_async_tx_descriptor *tx;
342 dma_cookie_t cookie;
343
344 tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
345 if (!tx) {
346 dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
347 return -EIO;
348 }
349
350 reinit_completion(&c->dma_done);
351
352 tx->callback = omap2_onenand_dma_complete_func;
353 tx->callback_param = &c->dma_done;
354
355 cookie = tx->tx_submit(tx);
356 if (dma_submit_error(cookie)) {
357 dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
358 return -EIO;
359 }
360
361 dma_async_issue_pending(c->dma_chan);
362
363 if (!wait_for_completion_io_timeout(&c->dma_done,
364 msecs_to_jiffies(20))) {
365 dmaengine_terminate_sync(c->dma_chan);
366 return -ETIMEDOUT;
367 }
368
369 return 0;
370}
371
372static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
373 unsigned char *buffer, int offset,
374 size_t count)
375{
376 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
377 struct onenand_chip *this = mtd->priv;
378 struct device *dev = &c->pdev->dev;
379 void *buf = (void *)buffer;
380 dma_addr_t dma_src, dma_dst;
381 int bram_offset, err;
382 size_t xtra;
383
384 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
385
386
387
388
389
390 if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
391 count < 384 || in_interrupt() || oops_in_progress )
392 goto out_copy;
393
394 xtra = count & 3;
395 if (xtra) {
396 count -= xtra;
397 memcpy(buf + count, this->base + bram_offset + count, xtra);
398 }
399
400 dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
401 dma_src = c->phys_base + bram_offset;
402
403 if (dma_mapping_error(dev, dma_dst)) {
404 dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
405 goto out_copy;
406 }
407
408 err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
409 dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
410 if (!err)
411 return 0;
412
413 dev_err(dev, "timeout waiting for DMA\n");
414
415out_copy:
416 memcpy(buf, this->base + bram_offset, count);
417 return 0;
418}
419
420static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
421 const unsigned char *buffer,
422 int offset, size_t count)
423{
424 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
425 struct onenand_chip *this = mtd->priv;
426 struct device *dev = &c->pdev->dev;
427 void *buf = (void *)buffer;
428 dma_addr_t dma_src, dma_dst;
429 int bram_offset, err;
430
431 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
432
433
434
435
436
437 if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
438 count < 384 || in_interrupt() || oops_in_progress )
439 goto out_copy;
440
441 dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
442 dma_dst = c->phys_base + bram_offset;
443 if (dma_mapping_error(dev, dma_src)) {
444 dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
445 goto out_copy;
446 }
447
448 err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
449 dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
450 if (!err)
451 return 0;
452
453 dev_err(dev, "timeout waiting for DMA\n");
454
455out_copy:
456 memcpy(this->base + bram_offset, buf, count);
457 return 0;
458}
459
460static void omap2_onenand_shutdown(struct platform_device *pdev)
461{
462 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
463
464
465
466
467
468 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
469}
470
471static int omap2_onenand_probe(struct platform_device *pdev)
472{
473 u32 val;
474 dma_cap_mask_t mask;
475 int freq, latency, r;
476 struct resource *res;
477 struct omap2_onenand *c;
478 struct gpmc_onenand_info info;
479 struct device *dev = &pdev->dev;
480 struct device_node *np = dev->of_node;
481
482 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
483 if (!res) {
484 dev_err(dev, "error getting memory resource\n");
485 return -EINVAL;
486 }
487
488 r = of_property_read_u32(np, "reg", &val);
489 if (r) {
490 dev_err(dev, "reg not found in DT\n");
491 return r;
492 }
493
494 c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
495 if (!c)
496 return -ENOMEM;
497
498 init_completion(&c->irq_done);
499 init_completion(&c->dma_done);
500 c->gpmc_cs = val;
501 c->phys_base = res->start;
502
503 c->onenand.base = devm_ioremap_resource(dev, res);
504 if (IS_ERR(c->onenand.base))
505 return PTR_ERR(c->onenand.base);
506
507 c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
508 if (IS_ERR(c->int_gpiod)) {
509 r = PTR_ERR(c->int_gpiod);
510
511 if (r != -EPROBE_DEFER)
512 dev_err(dev, "error getting gpio: %d\n", r);
513 return r;
514 }
515
516 if (c->int_gpiod) {
517 r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
518 omap2_onenand_interrupt,
519 IRQF_TRIGGER_RISING, "onenand", c);
520 if (r)
521 return r;
522
523 c->onenand.wait = omap2_onenand_wait;
524 }
525
526 dma_cap_zero(mask);
527 dma_cap_set(DMA_MEMCPY, mask);
528
529 c->dma_chan = dma_request_channel(mask, NULL, NULL);
530 if (c->dma_chan) {
531 c->onenand.read_bufferram = omap2_onenand_read_bufferram;
532 c->onenand.write_bufferram = omap2_onenand_write_bufferram;
533 }
534
535 c->pdev = pdev;
536 c->mtd.priv = &c->onenand;
537 c->mtd.dev.parent = dev;
538 mtd_set_of_node(&c->mtd, dev->of_node);
539
540 dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
541 c->gpmc_cs, c->phys_base, c->onenand.base,
542 c->dma_chan ? "DMA" : "PIO");
543
544 if ((r = onenand_scan(&c->mtd, 1)) < 0)
545 goto err_release_dma;
546
547 freq = omap2_onenand_get_freq(c->onenand.version_id);
548 if (freq > 0) {
549 switch (freq) {
550 case 104:
551 latency = 7;
552 break;
553 case 83:
554 latency = 6;
555 break;
556 case 66:
557 latency = 5;
558 break;
559 case 56:
560 latency = 4;
561 break;
562 default:
563 latency = 3;
564 break;
565 }
566
567 r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
568 freq, latency, &info);
569 if (r)
570 goto err_release_onenand;
571
572 r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
573 latency, info.burst_len);
574 if (r)
575 goto err_release_onenand;
576
577 if (info.sync_read || info.sync_write)
578 dev_info(dev, "optimized timings for %d MHz\n", freq);
579 }
580
581 r = mtd_device_register(&c->mtd, NULL, 0);
582 if (r)
583 goto err_release_onenand;
584
585 platform_set_drvdata(pdev, c);
586
587 return 0;
588
589err_release_onenand:
590 onenand_release(&c->mtd);
591err_release_dma:
592 if (c->dma_chan)
593 dma_release_channel(c->dma_chan);
594
595 return r;
596}
597
598static int omap2_onenand_remove(struct platform_device *pdev)
599{
600 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
601
602 onenand_release(&c->mtd);
603 if (c->dma_chan)
604 dma_release_channel(c->dma_chan);
605 omap2_onenand_shutdown(pdev);
606
607 return 0;
608}
609
610static const struct of_device_id omap2_onenand_id_table[] = {
611 { .compatible = "ti,omap2-onenand", },
612 {},
613};
614MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
615
616static struct platform_driver omap2_onenand_driver = {
617 .probe = omap2_onenand_probe,
618 .remove = omap2_onenand_remove,
619 .shutdown = omap2_onenand_shutdown,
620 .driver = {
621 .name = DRIVER_NAME,
622 .of_match_table = omap2_onenand_id_table,
623 },
624};
625
626module_platform_driver(omap2_onenand_driver);
627
628MODULE_ALIAS("platform:" DRIVER_NAME);
629MODULE_LICENSE("GPL");
630MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
631MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
632