1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/mfd/core.h>
30#include <linux/mfd/tmio.h>
31#include <linux/delay.h>
32#include <linux/io.h>
33#include <linux/irq.h>
34#include <linux/interrupt.h>
35#include <linux/ioport.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/rawnand.h>
38#include <linux/mtd/partitions.h>
39#include <linux/slab.h>
40
41
42
43
44
45
46#define CCR_COMMAND 0x04
47#define CCR_BASE 0x10
48#define CCR_INTP 0x3d
49#define CCR_INTE 0x48
50#define CCR_EC 0x4a
51#define CCR_ICC 0x4c
52#define CCR_ECCC 0x5b
53#define CCR_NFTC 0x60
54#define CCR_NFM 0x61
55#define CCR_NFPSC 0x62
56#define CCR_NFDC 0x63
57
58
59
60
61#define FCR_DATA 0x00
62#define FCR_MODE 0x04
63#define FCR_STATUS 0x05
64#define FCR_ISR 0x06
65#define FCR_IMR 0x07
66
67
68#define FCR_MODE_DATA 0x94
69#define FCR_MODE_COMMAND 0x95
70#define FCR_MODE_ADDRESS 0x96
71
72#define FCR_MODE_HWECC_CALC 0xB4
73#define FCR_MODE_HWECC_RESULT 0xD4
74#define FCR_MODE_HWECC_RESET 0xF4
75
76#define FCR_MODE_POWER_ON 0x0C
77#define FCR_MODE_POWER_OFF 0x08
78
79#define FCR_MODE_LED_OFF 0x00
80#define FCR_MODE_LED_ON 0x04
81
82#define FCR_MODE_EJECT_ON 0x68
83#define FCR_MODE_EJECT_OFF 0x08
84
85#define FCR_MODE_LOCK 0x6C
86#define FCR_MODE_UNLOCK 0x0C
87
88#define FCR_MODE_CONTROLLER_ID 0x40
89#define FCR_MODE_STANDBY 0x00
90
91#define FCR_MODE_WE 0x80
92#define FCR_MODE_ECC1 0x40
93#define FCR_MODE_ECC0 0x20
94#define FCR_MODE_CE 0x10
95#define FCR_MODE_PCNT1 0x08
96#define FCR_MODE_PCNT0 0x04
97#define FCR_MODE_ALE 0x02
98#define FCR_MODE_CLE 0x01
99
100#define FCR_STATUS_BUSY 0x80
101
102
103
104struct tmio_nand {
105 struct nand_controller controller;
106 struct nand_chip chip;
107 struct completion comp;
108
109 struct platform_device *dev;
110
111 void __iomem *ccr;
112 void __iomem *fcr;
113 unsigned long fcr_base;
114
115 unsigned int irq;
116
117
118 u8 read;
119 unsigned read_good:1;
120};
121
122static inline struct tmio_nand *mtd_to_tmio(struct mtd_info *mtd)
123{
124 return container_of(mtd_to_nand(mtd), struct tmio_nand, chip);
125}
126
127
128
129
130static void tmio_nand_hwcontrol(struct nand_chip *chip, int cmd,
131 unsigned int ctrl)
132{
133 struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
134
135 if (ctrl & NAND_CTRL_CHANGE) {
136 u8 mode;
137
138 if (ctrl & NAND_NCE) {
139 mode = FCR_MODE_DATA;
140
141 if (ctrl & NAND_CLE)
142 mode |= FCR_MODE_CLE;
143 else
144 mode &= ~FCR_MODE_CLE;
145
146 if (ctrl & NAND_ALE)
147 mode |= FCR_MODE_ALE;
148 else
149 mode &= ~FCR_MODE_ALE;
150 } else {
151 mode = FCR_MODE_STANDBY;
152 }
153
154 tmio_iowrite8(mode, tmio->fcr + FCR_MODE);
155 tmio->read_good = 0;
156 }
157
158 if (cmd != NAND_CMD_NONE)
159 tmio_iowrite8(cmd, chip->legacy.IO_ADDR_W);
160}
161
162static int tmio_nand_dev_ready(struct nand_chip *chip)
163{
164 struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
165
166 return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY);
167}
168
169static irqreturn_t tmio_irq(int irq, void *__tmio)
170{
171 struct tmio_nand *tmio = __tmio;
172
173
174 tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
175 complete(&tmio->comp);
176
177 return IRQ_HANDLED;
178}
179
180
181
182
183
184
185
186static int tmio_nand_wait(struct nand_chip *nand_chip)
187{
188 struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(nand_chip));
189 long timeout;
190 u8 status;
191
192
193
194 tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
195 reinit_completion(&tmio->comp);
196 tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
197
198 timeout = 400;
199 timeout = wait_for_completion_timeout(&tmio->comp,
200 msecs_to_jiffies(timeout));
201
202 if (unlikely(!tmio_nand_dev_ready(nand_chip))) {
203 tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
204 dev_warn(&tmio->dev->dev, "still busy after 400 ms\n");
205
206 } else if (unlikely(!timeout)) {
207 tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
208 dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
209 }
210
211 nand_status_op(nand_chip, &status);
212 return status;
213}
214
215
216
217
218
219
220
221
222
223static u_char tmio_nand_read_byte(struct nand_chip *chip)
224{
225 struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
226 unsigned int data;
227
228 if (tmio->read_good--)
229 return tmio->read;
230
231 data = tmio_ioread16(tmio->fcr + FCR_DATA);
232 tmio->read = data >> 8;
233 return data;
234}
235
236
237
238
239
240
241
242static void
243tmio_nand_write_buf(struct nand_chip *chip, const u_char *buf, int len)
244{
245 struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
246
247 tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
248}
249
250static void tmio_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
251{
252 struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
253
254 tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
255}
256
257static void tmio_nand_enable_hwecc(struct nand_chip *chip, int mode)
258{
259 struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
260
261 tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE);
262 tmio_ioread8(tmio->fcr + FCR_DATA);
263 tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE);
264}
265
266static int tmio_nand_calculate_ecc(struct nand_chip *chip, const u_char *dat,
267 u_char *ecc_code)
268{
269 struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
270 unsigned int ecc;
271
272 tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE);
273
274 ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
275 ecc_code[1] = ecc;
276 ecc_code[0] = ecc >> 8;
277 ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
278 ecc_code[2] = ecc;
279 ecc_code[4] = ecc >> 8;
280 ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
281 ecc_code[3] = ecc;
282 ecc_code[5] = ecc >> 8;
283
284 tmio_iowrite8(FCR_MODE_DATA, tmio->fcr + FCR_MODE);
285 return 0;
286}
287
288static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf,
289 unsigned char *read_ecc,
290 unsigned char *calc_ecc)
291{
292 int r0, r1;
293
294
295 r0 = rawnand_sw_hamming_correct(chip, buf, read_ecc, calc_ecc);
296 if (r0 < 0)
297 return r0;
298 r1 = rawnand_sw_hamming_correct(chip, buf + 256, read_ecc + 3,
299 calc_ecc + 3);
300 if (r1 < 0)
301 return r1;
302 return r0 + r1;
303}
304
305static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
306{
307 const struct mfd_cell *cell = mfd_get_cell(dev);
308 int ret;
309
310 if (cell->enable) {
311 ret = cell->enable(dev);
312 if (ret)
313 return ret;
314 }
315
316
317 tmio_iowrite8(0x81, tmio->ccr + CCR_ICC);
318
319
320 tmio_iowrite16(tmio->fcr_base, tmio->ccr + CCR_BASE);
321 tmio_iowrite16(tmio->fcr_base >> 16, tmio->ccr + CCR_BASE + 2);
322
323
324 tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND);
325
326
327
328 tmio_iowrite8(0x02, tmio->ccr + CCR_NFPSC);
329
330
331 tmio_iowrite8(0x02, tmio->ccr + CCR_NFDC);
332
333
334 tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
335
336
337 tmio_iowrite8(FCR_MODE_POWER_ON, tmio->fcr + FCR_MODE);
338 tmio_iowrite8(FCR_MODE_COMMAND, tmio->fcr + FCR_MODE);
339 tmio_iowrite8(NAND_CMD_RESET, tmio->fcr + FCR_DATA);
340
341
342 tmio_iowrite8(FCR_MODE_STANDBY, tmio->fcr + FCR_MODE);
343
344 mdelay(5);
345
346 return 0;
347}
348
349static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
350{
351 const struct mfd_cell *cell = mfd_get_cell(dev);
352
353 tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
354 if (cell->disable)
355 cell->disable(dev);
356}
357
358static int tmio_attach_chip(struct nand_chip *chip)
359{
360 if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
361 return 0;
362
363 chip->ecc.size = 512;
364 chip->ecc.bytes = 6;
365 chip->ecc.strength = 2;
366 chip->ecc.hwctl = tmio_nand_enable_hwecc;
367 chip->ecc.calculate = tmio_nand_calculate_ecc;
368 chip->ecc.correct = tmio_nand_correct_data;
369
370 return 0;
371}
372
373static const struct nand_controller_ops tmio_ops = {
374 .attach_chip = tmio_attach_chip,
375};
376
377static int tmio_probe(struct platform_device *dev)
378{
379 struct tmio_nand_data *data = dev_get_platdata(&dev->dev);
380 struct resource *fcr = platform_get_resource(dev,
381 IORESOURCE_MEM, 0);
382 struct resource *ccr = platform_get_resource(dev,
383 IORESOURCE_MEM, 1);
384 int irq = platform_get_irq(dev, 0);
385 struct tmio_nand *tmio;
386 struct mtd_info *mtd;
387 struct nand_chip *nand_chip;
388 int retval;
389
390 if (data == NULL)
391 dev_warn(&dev->dev, "NULL platform data!\n");
392
393 tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL);
394 if (!tmio)
395 return -ENOMEM;
396
397 init_completion(&tmio->comp);
398
399 tmio->dev = dev;
400
401 platform_set_drvdata(dev, tmio);
402 nand_chip = &tmio->chip;
403 mtd = nand_to_mtd(nand_chip);
404 mtd->name = "tmio-nand";
405 mtd->dev.parent = &dev->dev;
406
407 nand_controller_init(&tmio->controller);
408 tmio->controller.ops = &tmio_ops;
409 nand_chip->controller = &tmio->controller;
410
411 tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
412 if (!tmio->ccr)
413 return -EIO;
414
415 tmio->fcr_base = fcr->start & 0xfffff;
416 tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr));
417 if (!tmio->fcr)
418 return -EIO;
419
420 retval = tmio_hw_init(dev, tmio);
421 if (retval)
422 return retval;
423
424
425 nand_chip->legacy.IO_ADDR_R = tmio->fcr;
426 nand_chip->legacy.IO_ADDR_W = tmio->fcr;
427
428
429 nand_chip->legacy.cmd_ctrl = tmio_nand_hwcontrol;
430 nand_chip->legacy.dev_ready = tmio_nand_dev_ready;
431 nand_chip->legacy.read_byte = tmio_nand_read_byte;
432 nand_chip->legacy.write_buf = tmio_nand_write_buf;
433 nand_chip->legacy.read_buf = tmio_nand_read_buf;
434
435 if (data)
436 nand_chip->badblock_pattern = data->badblock_pattern;
437
438
439 nand_chip->legacy.chip_delay = 15;
440
441 retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0,
442 dev_name(&dev->dev), tmio);
443 if (retval) {
444 dev_err(&dev->dev, "request_irq error %d\n", retval);
445 goto err_irq;
446 }
447
448 tmio->irq = irq;
449 nand_chip->legacy.waitfunc = tmio_nand_wait;
450
451
452 retval = nand_scan(nand_chip, 1);
453 if (retval)
454 goto err_irq;
455
456
457 retval = mtd_device_parse_register(mtd,
458 data ? data->part_parsers : NULL,
459 NULL,
460 data ? data->partition : NULL,
461 data ? data->num_partitions : 0);
462 if (!retval)
463 return retval;
464
465 nand_cleanup(nand_chip);
466
467err_irq:
468 tmio_hw_stop(dev, tmio);
469 return retval;
470}
471
472static int tmio_remove(struct platform_device *dev)
473{
474 struct tmio_nand *tmio = platform_get_drvdata(dev);
475 struct nand_chip *chip = &tmio->chip;
476 int ret;
477
478 ret = mtd_device_unregister(nand_to_mtd(chip));
479 WARN_ON(ret);
480 nand_cleanup(chip);
481 tmio_hw_stop(dev, tmio);
482 return 0;
483}
484
485#ifdef CONFIG_PM
486static int tmio_suspend(struct platform_device *dev, pm_message_t state)
487{
488 const struct mfd_cell *cell = mfd_get_cell(dev);
489
490 if (cell->suspend)
491 cell->suspend(dev);
492
493 tmio_hw_stop(dev, platform_get_drvdata(dev));
494 return 0;
495}
496
497static int tmio_resume(struct platform_device *dev)
498{
499 const struct mfd_cell *cell = mfd_get_cell(dev);
500
501
502
503
504 tmio_hw_init(dev, platform_get_drvdata(dev));
505
506 if (cell->resume)
507 cell->resume(dev);
508
509 return 0;
510}
511#else
512#define tmio_suspend NULL
513#define tmio_resume NULL
514#endif
515
516static struct platform_driver tmio_driver = {
517 .driver.name = "tmio-nand",
518 .driver.owner = THIS_MODULE,
519 .probe = tmio_probe,
520 .remove = tmio_remove,
521 .suspend = tmio_suspend,
522 .resume = tmio_resume,
523};
524
525module_platform_driver(tmio_driver);
526
527MODULE_LICENSE("GPL v2");
528MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
529MODULE_DESCRIPTION("NAND flash driver on Toshiba Mobile IO controller");
530MODULE_ALIAS("platform:tmio-nand");
531