1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/vmalloc.h>
31#include <linux/math64.h>
32#include <linux/slab.h>
33#include <linux/errno.h>
34#include <linux/string.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/nand.h>
37#include <linux/mtd/nand_bch.h>
38#include <linux/mtd/partitions.h>
39#include <linux/delay.h>
40#include <linux/list.h>
41#include <linux/random.h>
42#include <linux/sched.h>
43#include <linux/sched/mm.h>
44#include <linux/fs.h>
45#include <linux/pagemap.h>
46#include <linux/seq_file.h>
47#include <linux/debugfs.h>
48
49
50#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
51 !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
52 !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
53 !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
54#define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
55#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
56#define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF
57#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF
58#endif
59
60#ifndef CONFIG_NANDSIM_ACCESS_DELAY
61#define CONFIG_NANDSIM_ACCESS_DELAY 25
62#endif
63#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
64#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
65#endif
66#ifndef CONFIG_NANDSIM_ERASE_DELAY
67#define CONFIG_NANDSIM_ERASE_DELAY 2
68#endif
69#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
70#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
71#endif
72#ifndef CONFIG_NANDSIM_INPUT_CYCLE
73#define CONFIG_NANDSIM_INPUT_CYCLE 50
74#endif
75#ifndef CONFIG_NANDSIM_BUS_WIDTH
76#define CONFIG_NANDSIM_BUS_WIDTH 8
77#endif
78#ifndef CONFIG_NANDSIM_DO_DELAYS
79#define CONFIG_NANDSIM_DO_DELAYS 0
80#endif
81#ifndef CONFIG_NANDSIM_LOG
82#define CONFIG_NANDSIM_LOG 0
83#endif
84#ifndef CONFIG_NANDSIM_DBG
85#define CONFIG_NANDSIM_DBG 0
86#endif
87#ifndef CONFIG_NANDSIM_MAX_PARTS
88#define CONFIG_NANDSIM_MAX_PARTS 32
89#endif
90
91static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
92static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
93static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
94static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
95static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
96static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
97static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
98static uint log = CONFIG_NANDSIM_LOG;
99static uint dbg = CONFIG_NANDSIM_DBG;
100static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
101static unsigned int parts_num;
102static char *badblocks = NULL;
103static char *weakblocks = NULL;
104static char *weakpages = NULL;
105static unsigned int bitflips = 0;
106static char *gravepages = NULL;
107static unsigned int overridesize = 0;
108static char *cache_file = NULL;
109static unsigned int bbt;
110static unsigned int bch;
111static u_char id_bytes[8] = {
112 [0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
113 [1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
114 [2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
115 [3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
116 [4 ... 7] = 0xFF,
117};
118
119module_param_array(id_bytes, byte, NULL, 0400);
120module_param_named(first_id_byte, id_bytes[0], byte, 0400);
121module_param_named(second_id_byte, id_bytes[1], byte, 0400);
122module_param_named(third_id_byte, id_bytes[2], byte, 0400);
123module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
124module_param(access_delay, uint, 0400);
125module_param(programm_delay, uint, 0400);
126module_param(erase_delay, uint, 0400);
127module_param(output_cycle, uint, 0400);
128module_param(input_cycle, uint, 0400);
129module_param(bus_width, uint, 0400);
130module_param(do_delays, uint, 0400);
131module_param(log, uint, 0400);
132module_param(dbg, uint, 0400);
133module_param_array(parts, ulong, &parts_num, 0400);
134module_param(badblocks, charp, 0400);
135module_param(weakblocks, charp, 0400);
136module_param(weakpages, charp, 0400);
137module_param(bitflips, uint, 0400);
138module_param(gravepages, charp, 0400);
139module_param(overridesize, uint, 0400);
140module_param(cache_file, charp, 0400);
141module_param(bbt, uint, 0400);
142module_param(bch, uint, 0400);
143
144MODULE_PARM_DESC(id_bytes, "The ID bytes returned by NAND Flash 'read ID' command");
145MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
146MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
147MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command (obsolete)");
148MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
149MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
150MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
151MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
152MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
153MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
154MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
155MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
156MODULE_PARM_DESC(log, "Perform logging if not zero");
157MODULE_PARM_DESC(dbg, "Output debug information if not zero");
158MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
159
160MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
161MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
162 " separated by commas e.g. 113:2 means eb 113"
163 " can be erased only twice before failing");
164MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
165 " separated by commas e.g. 1401:2 means page 1401"
166 " can be written only twice before failing");
167MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
168MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
169 " separated by commas e.g. 1401:2 means page 1401"
170 " can be read only twice before failing");
171MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
172 "The size is specified in erase blocks and as the exponent of a power of two"
173 " e.g. 5 means a size of 32 erase blocks");
174MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
175MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
176MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
177 "be correctable in 512-byte blocks");
178
179
180#define NS_LARGEST_PAGE_SIZE 4096
181
182
183#define NS_OUTPUT_PREFIX "[nandsim]"
184
185
186#define NS_LOG(args...) \
187 do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
188#define NS_DBG(args...) \
189 do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
190#define NS_WARN(args...) \
191 do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
192#define NS_ERR(args...) \
193 do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
194#define NS_INFO(args...) \
195 do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
196
197
198#define NS_UDELAY(us) \
199 do { if (do_delays) udelay(us); } while(0)
200#define NS_MDELAY(us) \
201 do { if (do_delays) mdelay(us); } while(0)
202
203
204#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
205
206
207#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
208
209
210#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
211
212
213#define NS_RAW_OFFSET(ns) \
214 (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
215
216
217#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
218
219
220#define STATE_CMD_READ0 0x00000001
221#define STATE_CMD_READ1 0x00000002
222#define STATE_CMD_READSTART 0x00000003
223#define STATE_CMD_PAGEPROG 0x00000004
224#define STATE_CMD_READOOB 0x00000005
225#define STATE_CMD_ERASE1 0x00000006
226#define STATE_CMD_STATUS 0x00000007
227#define STATE_CMD_SEQIN 0x00000009
228#define STATE_CMD_READID 0x0000000A
229#define STATE_CMD_ERASE2 0x0000000B
230#define STATE_CMD_RESET 0x0000000C
231#define STATE_CMD_RNDOUT 0x0000000D
232#define STATE_CMD_RNDOUTSTART 0x0000000E
233#define STATE_CMD_MASK 0x0000000F
234
235
236#define STATE_ADDR_PAGE 0x00000010
237#define STATE_ADDR_SEC 0x00000020
238#define STATE_ADDR_COLUMN 0x00000030
239#define STATE_ADDR_ZERO 0x00000040
240#define STATE_ADDR_MASK 0x00000070
241
242
243#define STATE_DATAIN 0x00000100
244#define STATE_DATAIN_MASK 0x00000100
245
246#define STATE_DATAOUT 0x00001000
247#define STATE_DATAOUT_ID 0x00002000
248#define STATE_DATAOUT_STATUS 0x00003000
249#define STATE_DATAOUT_MASK 0x00007000
250
251
252#define STATE_READY 0x00000000
253
254
255#define STATE_UNKNOWN 0x10000000
256
257
258#define ACTION_CPY 0x00100000
259#define ACTION_PRGPAGE 0x00200000
260#define ACTION_SECERASE 0x00300000
261#define ACTION_ZEROOFF 0x00400000
262#define ACTION_HALFOFF 0x00500000
263#define ACTION_OOBOFF 0x00600000
264#define ACTION_MASK 0x00700000
265
266#define NS_OPER_NUM 13
267#define NS_OPER_STATES 6
268
269#define OPT_ANY 0xFFFFFFFF
270#define OPT_PAGE512 0x00000002
271#define OPT_PAGE2048 0x00000008
272#define OPT_PAGE512_8BIT 0x00000040
273#define OPT_PAGE4096 0x00000080
274#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096)
275#define OPT_SMALLPAGE (OPT_PAGE512)
276
277
278#define NS_STATE(x) ((x) & ~ACTION_MASK)
279
280
281
282
283
284
285#define NS_MAX_PREVSTATES 1
286
287
288#define NS_MAX_HELD_PAGES 16
289
290struct nandsim_debug_info {
291 struct dentry *dfs_root;
292 struct dentry *dfs_wear_report;
293};
294
295
296
297
298union ns_mem {
299 u_char *byte;
300 uint16_t *word;
301};
302
303
304
305
306struct nandsim {
307 struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
308 unsigned int nbparts;
309
310 uint busw;
311 u_char ids[8];
312 uint32_t options;
313 uint32_t state;
314 uint32_t nxstate;
315
316 uint32_t *op;
317 uint32_t pstates[NS_MAX_PREVSTATES];
318 uint16_t npstates;
319 uint16_t stateidx;
320
321
322 union ns_mem *pages;
323
324
325 struct kmem_cache *nand_pages_slab;
326
327
328 union ns_mem buf;
329
330
331 struct {
332 uint64_t totsz;
333 uint32_t secsz;
334 uint pgsz;
335 uint oobsz;
336 uint64_t totszoob;
337 uint pgszoob;
338 uint secszoob;
339 uint pgnum;
340 uint pgsec;
341 uint secshift;
342 uint pgshift;
343 uint pgaddrbytes;
344 uint secaddrbytes;
345 uint idbytes;
346 } geom;
347
348
349 struct {
350 unsigned command;
351 u_char status;
352 uint row;
353 uint column;
354 uint count;
355 uint num;
356 uint off;
357 } regs;
358
359
360 struct {
361 int ce;
362 int cle;
363 int ale;
364 int wp;
365 } lines;
366
367
368 struct file *cfile;
369 unsigned long *pages_written;
370 void *file_buf;
371 struct page *held_pages[NS_MAX_HELD_PAGES];
372 int held_cnt;
373
374 struct nandsim_debug_info dbg;
375};
376
377
378
379
380
381static struct nandsim_operations {
382 uint32_t reqopts;
383 uint32_t states[NS_OPER_STATES];
384} ops[NS_OPER_NUM] = {
385
386 {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
387 STATE_DATAOUT, STATE_READY}},
388
389 {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
390 STATE_DATAOUT, STATE_READY}},
391
392 {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
393 STATE_DATAOUT, STATE_READY}},
394
395 {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
396 STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
397
398 {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
399 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
400
401 {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
402 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
403
404 {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
405 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
406
407 {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
408
409 {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
410
411 {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
412
413 {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
414 STATE_DATAOUT, STATE_READY}},
415
416 {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
417 STATE_DATAOUT, STATE_READY}},
418};
419
420struct weak_block {
421 struct list_head list;
422 unsigned int erase_block_no;
423 unsigned int max_erases;
424 unsigned int erases_done;
425};
426
427static LIST_HEAD(weak_blocks);
428
429struct weak_page {
430 struct list_head list;
431 unsigned int page_no;
432 unsigned int max_writes;
433 unsigned int writes_done;
434};
435
436static LIST_HEAD(weak_pages);
437
438struct grave_page {
439 struct list_head list;
440 unsigned int page_no;
441 unsigned int max_reads;
442 unsigned int reads_done;
443};
444
445static LIST_HEAD(grave_pages);
446
447static unsigned long *erase_block_wear = NULL;
448static unsigned int wear_eb_count = 0;
449static unsigned long total_wear = 0;
450
451
452static struct mtd_info *nsmtd;
453
454static int nandsim_debugfs_show(struct seq_file *m, void *private)
455{
456 unsigned long wmin = -1, wmax = 0, avg;
457 unsigned long deciles[10], decile_max[10], tot = 0;
458 unsigned int i;
459
460
461 for (i = 0; i < wear_eb_count; ++i) {
462 unsigned long wear = erase_block_wear[i];
463 if (wear < wmin)
464 wmin = wear;
465 if (wear > wmax)
466 wmax = wear;
467 tot += wear;
468 }
469
470 for (i = 0; i < 9; ++i) {
471 deciles[i] = 0;
472 decile_max[i] = (wmax * (i + 1) + 5) / 10;
473 }
474 deciles[9] = 0;
475 decile_max[9] = wmax;
476 for (i = 0; i < wear_eb_count; ++i) {
477 int d;
478 unsigned long wear = erase_block_wear[i];
479 for (d = 0; d < 10; ++d)
480 if (wear <= decile_max[d]) {
481 deciles[d] += 1;
482 break;
483 }
484 }
485 avg = tot / wear_eb_count;
486
487
488 seq_printf(m, "Total numbers of erases: %lu\n", tot);
489 seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
490 seq_printf(m, "Average number of erases: %lu\n", avg);
491 seq_printf(m, "Maximum number of erases: %lu\n", wmax);
492 seq_printf(m, "Minimum number of erases: %lu\n", wmin);
493 for (i = 0; i < 10; ++i) {
494 unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
495 if (from > decile_max[i])
496 continue;
497 seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
498 from,
499 decile_max[i],
500 deciles[i]);
501 }
502
503 return 0;
504}
505
506static int nandsim_debugfs_open(struct inode *inode, struct file *file)
507{
508 return single_open(file, nandsim_debugfs_show, inode->i_private);
509}
510
511static const struct file_operations dfs_fops = {
512 .open = nandsim_debugfs_open,
513 .read = seq_read,
514 .llseek = seq_lseek,
515 .release = single_release,
516};
517
518
519
520
521
522
523
524
525static int nandsim_debugfs_create(struct nandsim *dev)
526{
527 struct nandsim_debug_info *dbg = &dev->dbg;
528 struct dentry *dent;
529
530 if (!IS_ENABLED(CONFIG_DEBUG_FS))
531 return 0;
532
533 dent = debugfs_create_dir("nandsim", NULL);
534 if (!dent) {
535 NS_ERR("cannot create \"nandsim\" debugfs directory\n");
536 return -ENODEV;
537 }
538 dbg->dfs_root = dent;
539
540 dent = debugfs_create_file("wear_report", S_IRUSR,
541 dbg->dfs_root, dev, &dfs_fops);
542 if (!dent)
543 goto out_remove;
544 dbg->dfs_wear_report = dent;
545
546 return 0;
547
548out_remove:
549 debugfs_remove_recursive(dbg->dfs_root);
550 return -ENODEV;
551}
552
553
554
555
556static void nandsim_debugfs_remove(struct nandsim *ns)
557{
558 if (IS_ENABLED(CONFIG_DEBUG_FS))
559 debugfs_remove_recursive(ns->dbg.dfs_root);
560}
561
562
563
564
565
566
567
568static int __init alloc_device(struct nandsim *ns)
569{
570 struct file *cfile;
571 int i, err;
572
573 if (cache_file) {
574 cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
575 if (IS_ERR(cfile))
576 return PTR_ERR(cfile);
577 if (!(cfile->f_mode & FMODE_CAN_READ)) {
578 NS_ERR("alloc_device: cache file not readable\n");
579 err = -EINVAL;
580 goto err_close;
581 }
582 if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
583 NS_ERR("alloc_device: cache file not writeable\n");
584 err = -EINVAL;
585 goto err_close;
586 }
587 ns->pages_written = vzalloc(BITS_TO_LONGS(ns->geom.pgnum) *
588 sizeof(unsigned long));
589 if (!ns->pages_written) {
590 NS_ERR("alloc_device: unable to allocate pages written array\n");
591 err = -ENOMEM;
592 goto err_close;
593 }
594 ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
595 if (!ns->file_buf) {
596 NS_ERR("alloc_device: unable to allocate file buf\n");
597 err = -ENOMEM;
598 goto err_free;
599 }
600 ns->cfile = cfile;
601 return 0;
602 }
603
604 ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
605 if (!ns->pages) {
606 NS_ERR("alloc_device: unable to allocate page array\n");
607 return -ENOMEM;
608 }
609 for (i = 0; i < ns->geom.pgnum; i++) {
610 ns->pages[i].byte = NULL;
611 }
612 ns->nand_pages_slab = kmem_cache_create("nandsim",
613 ns->geom.pgszoob, 0, 0, NULL);
614 if (!ns->nand_pages_slab) {
615 NS_ERR("cache_create: unable to create kmem_cache\n");
616 return -ENOMEM;
617 }
618
619 return 0;
620
621err_free:
622 vfree(ns->pages_written);
623err_close:
624 filp_close(cfile, NULL);
625 return err;
626}
627
628
629
630
631static void free_device(struct nandsim *ns)
632{
633 int i;
634
635 if (ns->cfile) {
636 kfree(ns->file_buf);
637 vfree(ns->pages_written);
638 filp_close(ns->cfile, NULL);
639 return;
640 }
641
642 if (ns->pages) {
643 for (i = 0; i < ns->geom.pgnum; i++) {
644 if (ns->pages[i].byte)
645 kmem_cache_free(ns->nand_pages_slab,
646 ns->pages[i].byte);
647 }
648 kmem_cache_destroy(ns->nand_pages_slab);
649 vfree(ns->pages);
650 }
651}
652
653static char __init *get_partition_name(int i)
654{
655 return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
656}
657
658
659
660
661
662
663static int __init init_nandsim(struct mtd_info *mtd)
664{
665 struct nand_chip *chip = mtd_to_nand(mtd);
666 struct nandsim *ns = nand_get_controller_data(chip);
667 int i, ret = 0;
668 uint64_t remains;
669 uint64_t next_offset;
670
671 if (NS_IS_INITIALIZED(ns)) {
672 NS_ERR("init_nandsim: nandsim is already initialized\n");
673 return -EIO;
674 }
675
676
677 chip->chip_delay = 0;
678
679
680 ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
681 ns->geom.totsz = mtd->size;
682 ns->geom.pgsz = mtd->writesize;
683 ns->geom.oobsz = mtd->oobsize;
684 ns->geom.secsz = mtd->erasesize;
685 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
686 ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
687 ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
688 ns->geom.secshift = ffs(ns->geom.secsz) - 1;
689 ns->geom.pgshift = chip->page_shift;
690 ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
691 ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
692 ns->options = 0;
693
694 if (ns->geom.pgsz == 512) {
695 ns->options |= OPT_PAGE512;
696 if (ns->busw == 8)
697 ns->options |= OPT_PAGE512_8BIT;
698 } else if (ns->geom.pgsz == 2048) {
699 ns->options |= OPT_PAGE2048;
700 } else if (ns->geom.pgsz == 4096) {
701 ns->options |= OPT_PAGE4096;
702 } else {
703 NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
704 return -EIO;
705 }
706
707 if (ns->options & OPT_SMALLPAGE) {
708 if (ns->geom.totsz <= (32 << 20)) {
709 ns->geom.pgaddrbytes = 3;
710 ns->geom.secaddrbytes = 2;
711 } else {
712 ns->geom.pgaddrbytes = 4;
713 ns->geom.secaddrbytes = 3;
714 }
715 } else {
716 if (ns->geom.totsz <= (128 << 20)) {
717 ns->geom.pgaddrbytes = 4;
718 ns->geom.secaddrbytes = 2;
719 } else {
720 ns->geom.pgaddrbytes = 5;
721 ns->geom.secaddrbytes = 3;
722 }
723 }
724
725
726 if (parts_num > ARRAY_SIZE(ns->partitions)) {
727 NS_ERR("too many partitions.\n");
728 return -EINVAL;
729 }
730 remains = ns->geom.totsz;
731 next_offset = 0;
732 for (i = 0; i < parts_num; ++i) {
733 uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
734
735 if (!part_sz || part_sz > remains) {
736 NS_ERR("bad partition size.\n");
737 return -EINVAL;
738 }
739 ns->partitions[i].name = get_partition_name(i);
740 if (!ns->partitions[i].name) {
741 NS_ERR("unable to allocate memory.\n");
742 return -ENOMEM;
743 }
744 ns->partitions[i].offset = next_offset;
745 ns->partitions[i].size = part_sz;
746 next_offset += ns->partitions[i].size;
747 remains -= ns->partitions[i].size;
748 }
749 ns->nbparts = parts_num;
750 if (remains) {
751 if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
752 NS_ERR("too many partitions.\n");
753 return -EINVAL;
754 }
755 ns->partitions[i].name = get_partition_name(i);
756 if (!ns->partitions[i].name) {
757 NS_ERR("unable to allocate memory.\n");
758 return -ENOMEM;
759 }
760 ns->partitions[i].offset = next_offset;
761 ns->partitions[i].size = remains;
762 ns->nbparts += 1;
763 }
764
765 if (ns->busw == 16)
766 NS_WARN("16-bit flashes support wasn't tested\n");
767
768 printk("flash size: %llu MiB\n",
769 (unsigned long long)ns->geom.totsz >> 20);
770 printk("page size: %u bytes\n", ns->geom.pgsz);
771 printk("OOB area size: %u bytes\n", ns->geom.oobsz);
772 printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
773 printk("pages number: %u\n", ns->geom.pgnum);
774 printk("pages per sector: %u\n", ns->geom.pgsec);
775 printk("bus width: %u\n", ns->busw);
776 printk("bits in sector size: %u\n", ns->geom.secshift);
777 printk("bits in page size: %u\n", ns->geom.pgshift);
778 printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1);
779 printk("flash size with OOB: %llu KiB\n",
780 (unsigned long long)ns->geom.totszoob >> 10);
781 printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
782 printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
783 printk("options: %#x\n", ns->options);
784
785 if ((ret = alloc_device(ns)) != 0)
786 return ret;
787
788
789 ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
790 if (!ns->buf.byte) {
791 NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
792 ns->geom.pgszoob);
793 return -ENOMEM;
794 }
795 memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
796
797 return 0;
798}
799
800
801
802
803static void free_nandsim(struct nandsim *ns)
804{
805 kfree(ns->buf.byte);
806 free_device(ns);
807
808 return;
809}
810
811static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
812{
813 char *w;
814 int zero_ok;
815 unsigned int erase_block_no;
816 loff_t offset;
817
818 if (!badblocks)
819 return 0;
820 w = badblocks;
821 do {
822 zero_ok = (*w == '0' ? 1 : 0);
823 erase_block_no = simple_strtoul(w, &w, 0);
824 if (!zero_ok && !erase_block_no) {
825 NS_ERR("invalid badblocks.\n");
826 return -EINVAL;
827 }
828 offset = (loff_t)erase_block_no * ns->geom.secsz;
829 if (mtd_block_markbad(mtd, offset)) {
830 NS_ERR("invalid badblocks.\n");
831 return -EINVAL;
832 }
833 if (*w == ',')
834 w += 1;
835 } while (*w);
836 return 0;
837}
838
839static int parse_weakblocks(void)
840{
841 char *w;
842 int zero_ok;
843 unsigned int erase_block_no;
844 unsigned int max_erases;
845 struct weak_block *wb;
846
847 if (!weakblocks)
848 return 0;
849 w = weakblocks;
850 do {
851 zero_ok = (*w == '0' ? 1 : 0);
852 erase_block_no = simple_strtoul(w, &w, 0);
853 if (!zero_ok && !erase_block_no) {
854 NS_ERR("invalid weakblocks.\n");
855 return -EINVAL;
856 }
857 max_erases = 3;
858 if (*w == ':') {
859 w += 1;
860 max_erases = simple_strtoul(w, &w, 0);
861 }
862 if (*w == ',')
863 w += 1;
864 wb = kzalloc(sizeof(*wb), GFP_KERNEL);
865 if (!wb) {
866 NS_ERR("unable to allocate memory.\n");
867 return -ENOMEM;
868 }
869 wb->erase_block_no = erase_block_no;
870 wb->max_erases = max_erases;
871 list_add(&wb->list, &weak_blocks);
872 } while (*w);
873 return 0;
874}
875
876static int erase_error(unsigned int erase_block_no)
877{
878 struct weak_block *wb;
879
880 list_for_each_entry(wb, &weak_blocks, list)
881 if (wb->erase_block_no == erase_block_no) {
882 if (wb->erases_done >= wb->max_erases)
883 return 1;
884 wb->erases_done += 1;
885 return 0;
886 }
887 return 0;
888}
889
890static int parse_weakpages(void)
891{
892 char *w;
893 int zero_ok;
894 unsigned int page_no;
895 unsigned int max_writes;
896 struct weak_page *wp;
897
898 if (!weakpages)
899 return 0;
900 w = weakpages;
901 do {
902 zero_ok = (*w == '0' ? 1 : 0);
903 page_no = simple_strtoul(w, &w, 0);
904 if (!zero_ok && !page_no) {
905 NS_ERR("invalid weakpages.\n");
906 return -EINVAL;
907 }
908 max_writes = 3;
909 if (*w == ':') {
910 w += 1;
911 max_writes = simple_strtoul(w, &w, 0);
912 }
913 if (*w == ',')
914 w += 1;
915 wp = kzalloc(sizeof(*wp), GFP_KERNEL);
916 if (!wp) {
917 NS_ERR("unable to allocate memory.\n");
918 return -ENOMEM;
919 }
920 wp->page_no = page_no;
921 wp->max_writes = max_writes;
922 list_add(&wp->list, &weak_pages);
923 } while (*w);
924 return 0;
925}
926
927static int write_error(unsigned int page_no)
928{
929 struct weak_page *wp;
930
931 list_for_each_entry(wp, &weak_pages, list)
932 if (wp->page_no == page_no) {
933 if (wp->writes_done >= wp->max_writes)
934 return 1;
935 wp->writes_done += 1;
936 return 0;
937 }
938 return 0;
939}
940
941static int parse_gravepages(void)
942{
943 char *g;
944 int zero_ok;
945 unsigned int page_no;
946 unsigned int max_reads;
947 struct grave_page *gp;
948
949 if (!gravepages)
950 return 0;
951 g = gravepages;
952 do {
953 zero_ok = (*g == '0' ? 1 : 0);
954 page_no = simple_strtoul(g, &g, 0);
955 if (!zero_ok && !page_no) {
956 NS_ERR("invalid gravepagess.\n");
957 return -EINVAL;
958 }
959 max_reads = 3;
960 if (*g == ':') {
961 g += 1;
962 max_reads = simple_strtoul(g, &g, 0);
963 }
964 if (*g == ',')
965 g += 1;
966 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
967 if (!gp) {
968 NS_ERR("unable to allocate memory.\n");
969 return -ENOMEM;
970 }
971 gp->page_no = page_no;
972 gp->max_reads = max_reads;
973 list_add(&gp->list, &grave_pages);
974 } while (*g);
975 return 0;
976}
977
978static int read_error(unsigned int page_no)
979{
980 struct grave_page *gp;
981
982 list_for_each_entry(gp, &grave_pages, list)
983 if (gp->page_no == page_no) {
984 if (gp->reads_done >= gp->max_reads)
985 return 1;
986 gp->reads_done += 1;
987 return 0;
988 }
989 return 0;
990}
991
992static void free_lists(void)
993{
994 struct list_head *pos, *n;
995 list_for_each_safe(pos, n, &weak_blocks) {
996 list_del(pos);
997 kfree(list_entry(pos, struct weak_block, list));
998 }
999 list_for_each_safe(pos, n, &weak_pages) {
1000 list_del(pos);
1001 kfree(list_entry(pos, struct weak_page, list));
1002 }
1003 list_for_each_safe(pos, n, &grave_pages) {
1004 list_del(pos);
1005 kfree(list_entry(pos, struct grave_page, list));
1006 }
1007 kfree(erase_block_wear);
1008}
1009
1010static int setup_wear_reporting(struct mtd_info *mtd)
1011{
1012 size_t mem;
1013
1014 wear_eb_count = div_u64(mtd->size, mtd->erasesize);
1015 mem = wear_eb_count * sizeof(unsigned long);
1016 if (mem / sizeof(unsigned long) != wear_eb_count) {
1017 NS_ERR("Too many erase blocks for wear reporting\n");
1018 return -ENOMEM;
1019 }
1020 erase_block_wear = kzalloc(mem, GFP_KERNEL);
1021 if (!erase_block_wear) {
1022 NS_ERR("Too many erase blocks for wear reporting\n");
1023 return -ENOMEM;
1024 }
1025 return 0;
1026}
1027
1028static void update_wear(unsigned int erase_block_no)
1029{
1030 if (!erase_block_wear)
1031 return;
1032 total_wear += 1;
1033
1034
1035
1036
1037 if (total_wear == 0)
1038 NS_ERR("Erase counter total overflow\n");
1039 erase_block_wear[erase_block_no] += 1;
1040 if (erase_block_wear[erase_block_no] == 0)
1041 NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
1042}
1043
1044
1045
1046
1047static char *get_state_name(uint32_t state)
1048{
1049 switch (NS_STATE(state)) {
1050 case STATE_CMD_READ0:
1051 return "STATE_CMD_READ0";
1052 case STATE_CMD_READ1:
1053 return "STATE_CMD_READ1";
1054 case STATE_CMD_PAGEPROG:
1055 return "STATE_CMD_PAGEPROG";
1056 case STATE_CMD_READOOB:
1057 return "STATE_CMD_READOOB";
1058 case STATE_CMD_READSTART:
1059 return "STATE_CMD_READSTART";
1060 case STATE_CMD_ERASE1:
1061 return "STATE_CMD_ERASE1";
1062 case STATE_CMD_STATUS:
1063 return "STATE_CMD_STATUS";
1064 case STATE_CMD_SEQIN:
1065 return "STATE_CMD_SEQIN";
1066 case STATE_CMD_READID:
1067 return "STATE_CMD_READID";
1068 case STATE_CMD_ERASE2:
1069 return "STATE_CMD_ERASE2";
1070 case STATE_CMD_RESET:
1071 return "STATE_CMD_RESET";
1072 case STATE_CMD_RNDOUT:
1073 return "STATE_CMD_RNDOUT";
1074 case STATE_CMD_RNDOUTSTART:
1075 return "STATE_CMD_RNDOUTSTART";
1076 case STATE_ADDR_PAGE:
1077 return "STATE_ADDR_PAGE";
1078 case STATE_ADDR_SEC:
1079 return "STATE_ADDR_SEC";
1080 case STATE_ADDR_ZERO:
1081 return "STATE_ADDR_ZERO";
1082 case STATE_ADDR_COLUMN:
1083 return "STATE_ADDR_COLUMN";
1084 case STATE_DATAIN:
1085 return "STATE_DATAIN";
1086 case STATE_DATAOUT:
1087 return "STATE_DATAOUT";
1088 case STATE_DATAOUT_ID:
1089 return "STATE_DATAOUT_ID";
1090 case STATE_DATAOUT_STATUS:
1091 return "STATE_DATAOUT_STATUS";
1092 case STATE_READY:
1093 return "STATE_READY";
1094 case STATE_UNKNOWN:
1095 return "STATE_UNKNOWN";
1096 }
1097
1098 NS_ERR("get_state_name: unknown state, BUG\n");
1099 return NULL;
1100}
1101
1102
1103
1104
1105
1106
1107static int check_command(int cmd)
1108{
1109 switch (cmd) {
1110
1111 case NAND_CMD_READ0:
1112 case NAND_CMD_READ1:
1113 case NAND_CMD_READSTART:
1114 case NAND_CMD_PAGEPROG:
1115 case NAND_CMD_READOOB:
1116 case NAND_CMD_ERASE1:
1117 case NAND_CMD_STATUS:
1118 case NAND_CMD_SEQIN:
1119 case NAND_CMD_READID:
1120 case NAND_CMD_ERASE2:
1121 case NAND_CMD_RESET:
1122 case NAND_CMD_RNDOUT:
1123 case NAND_CMD_RNDOUTSTART:
1124 return 0;
1125
1126 default:
1127 return 1;
1128 }
1129}
1130
1131
1132
1133
1134static uint32_t get_state_by_command(unsigned command)
1135{
1136 switch (command) {
1137 case NAND_CMD_READ0:
1138 return STATE_CMD_READ0;
1139 case NAND_CMD_READ1:
1140 return STATE_CMD_READ1;
1141 case NAND_CMD_PAGEPROG:
1142 return STATE_CMD_PAGEPROG;
1143 case NAND_CMD_READSTART:
1144 return STATE_CMD_READSTART;
1145 case NAND_CMD_READOOB:
1146 return STATE_CMD_READOOB;
1147 case NAND_CMD_ERASE1:
1148 return STATE_CMD_ERASE1;
1149 case NAND_CMD_STATUS:
1150 return STATE_CMD_STATUS;
1151 case NAND_CMD_SEQIN:
1152 return STATE_CMD_SEQIN;
1153 case NAND_CMD_READID:
1154 return STATE_CMD_READID;
1155 case NAND_CMD_ERASE2:
1156 return STATE_CMD_ERASE2;
1157 case NAND_CMD_RESET:
1158 return STATE_CMD_RESET;
1159 case NAND_CMD_RNDOUT:
1160 return STATE_CMD_RNDOUT;
1161 case NAND_CMD_RNDOUTSTART:
1162 return STATE_CMD_RNDOUTSTART;
1163 }
1164
1165 NS_ERR("get_state_by_command: unknown command, BUG\n");
1166 return 0;
1167}
1168
1169
1170
1171
1172static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
1173{
1174 uint byte = (uint)bt;
1175
1176 if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
1177 ns->regs.column |= (byte << 8 * ns->regs.count);
1178 else {
1179 ns->regs.row |= (byte << 8 * (ns->regs.count -
1180 ns->geom.pgaddrbytes +
1181 ns->geom.secaddrbytes));
1182 }
1183
1184 return;
1185}
1186
1187
1188
1189
1190static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1191{
1192 NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
1193
1194 ns->state = STATE_READY;
1195 ns->nxstate = STATE_UNKNOWN;
1196 ns->op = NULL;
1197 ns->npstates = 0;
1198 ns->stateidx = 0;
1199 ns->regs.num = 0;
1200 ns->regs.count = 0;
1201 ns->regs.off = 0;
1202 ns->regs.row = 0;
1203 ns->regs.column = 0;
1204 ns->regs.status = status;
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249static int find_operation(struct nandsim *ns, uint32_t flag)
1250{
1251 int opsfound = 0;
1252 int i, j, idx = 0;
1253
1254 for (i = 0; i < NS_OPER_NUM; i++) {
1255
1256 int found = 1;
1257
1258 if (!(ns->options & ops[i].reqopts))
1259
1260 continue;
1261
1262 if (flag) {
1263 if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
1264 continue;
1265 } else {
1266 if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
1267 continue;
1268 }
1269
1270 for (j = 0; j < ns->npstates; j++)
1271 if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
1272 && (ns->options & ops[idx].reqopts)) {
1273 found = 0;
1274 break;
1275 }
1276
1277 if (found) {
1278 idx = i;
1279 opsfound += 1;
1280 }
1281 }
1282
1283 if (opsfound == 1) {
1284
1285 ns->op = &ops[idx].states[0];
1286 if (flag) {
1287
1288
1289
1290
1291
1292
1293
1294 ns->stateidx = ns->npstates - 1;
1295 } else {
1296 ns->stateidx = ns->npstates;
1297 }
1298 ns->npstates = 0;
1299 ns->state = ns->op[ns->stateidx];
1300 ns->nxstate = ns->op[ns->stateidx + 1];
1301 NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
1302 idx, get_state_name(ns->state), get_state_name(ns->nxstate));
1303 return 0;
1304 }
1305
1306 if (opsfound == 0) {
1307
1308 if (ns->npstates != 0) {
1309 NS_DBG("find_operation: no operation found, try again with state %s\n",
1310 get_state_name(ns->state));
1311 ns->npstates = 0;
1312 return find_operation(ns, 0);
1313
1314 }
1315 NS_DBG("find_operation: no operations found\n");
1316 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1317 return -2;
1318 }
1319
1320 if (flag) {
1321
1322 NS_DBG("find_operation: BUG, operation must be known if address is input\n");
1323 return -2;
1324 }
1325
1326 NS_DBG("find_operation: there is still ambiguity\n");
1327
1328 ns->pstates[ns->npstates++] = ns->state;
1329
1330 return -1;
1331}
1332
1333static void put_pages(struct nandsim *ns)
1334{
1335 int i;
1336
1337 for (i = 0; i < ns->held_cnt; i++)
1338 put_page(ns->held_pages[i]);
1339}
1340
1341
1342static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
1343{
1344 pgoff_t index, start_index, end_index;
1345 struct page *page;
1346 struct address_space *mapping = file->f_mapping;
1347
1348 start_index = pos >> PAGE_SHIFT;
1349 end_index = (pos + count - 1) >> PAGE_SHIFT;
1350 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
1351 return -EINVAL;
1352 ns->held_cnt = 0;
1353 for (index = start_index; index <= end_index; index++) {
1354 page = find_get_page(mapping, index);
1355 if (page == NULL) {
1356 page = find_or_create_page(mapping, index, GFP_NOFS);
1357 if (page == NULL) {
1358 write_inode_now(mapping->host, 1);
1359 page = find_or_create_page(mapping, index, GFP_NOFS);
1360 }
1361 if (page == NULL) {
1362 put_pages(ns);
1363 return -ENOMEM;
1364 }
1365 unlock_page(page);
1366 }
1367 ns->held_pages[ns->held_cnt++] = page;
1368 }
1369 return 0;
1370}
1371
1372static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
1373{
1374 ssize_t tx;
1375 int err;
1376 unsigned int noreclaim_flag;
1377
1378 err = get_pages(ns, file, count, pos);
1379 if (err)
1380 return err;
1381 noreclaim_flag = memalloc_noreclaim_save();
1382 tx = kernel_read(file, pos, buf, count);
1383 memalloc_noreclaim_restore(noreclaim_flag);
1384 put_pages(ns);
1385 return tx;
1386}
1387
1388static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
1389{
1390 ssize_t tx;
1391 int err;
1392 unsigned int noreclaim_flag;
1393
1394 err = get_pages(ns, file, count, pos);
1395 if (err)
1396 return err;
1397 noreclaim_flag = memalloc_noreclaim_save();
1398 tx = kernel_write(file, buf, count, pos);
1399 memalloc_noreclaim_restore(noreclaim_flag);
1400 put_pages(ns);
1401 return tx;
1402}
1403
1404
1405
1406
1407static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
1408{
1409 return &(ns->pages[ns->regs.row]);
1410}
1411
1412
1413
1414
1415static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
1416{
1417 return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
1418}
1419
1420static int do_read_error(struct nandsim *ns, int num)
1421{
1422 unsigned int page_no = ns->regs.row;
1423
1424 if (read_error(page_no)) {
1425 prandom_bytes(ns->buf.byte, num);
1426 NS_WARN("simulating read error in page %u\n", page_no);
1427 return 1;
1428 }
1429 return 0;
1430}
1431
1432static void do_bit_flips(struct nandsim *ns, int num)
1433{
1434 if (bitflips && prandom_u32() < (1 << 22)) {
1435 int flips = 1;
1436 if (bitflips > 1)
1437 flips = (prandom_u32() % (int) bitflips) + 1;
1438 while (flips--) {
1439 int pos = prandom_u32() % (num * 8);
1440 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1441 NS_WARN("read_page: flipping bit %d in page %d "
1442 "reading from %d ecc: corrected=%u failed=%u\n",
1443 pos, ns->regs.row, ns->regs.column + ns->regs.off,
1444 nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
1445 }
1446 }
1447}
1448
1449
1450
1451
1452static void read_page(struct nandsim *ns, int num)
1453{
1454 union ns_mem *mypage;
1455
1456 if (ns->cfile) {
1457 if (!test_bit(ns->regs.row, ns->pages_written)) {
1458 NS_DBG("read_page: page %d not written\n", ns->regs.row);
1459 memset(ns->buf.byte, 0xFF, num);
1460 } else {
1461 loff_t pos;
1462 ssize_t tx;
1463
1464 NS_DBG("read_page: page %d written, reading from %d\n",
1465 ns->regs.row, ns->regs.column + ns->regs.off);
1466 if (do_read_error(ns, num))
1467 return;
1468 pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
1469 tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
1470 if (tx != num) {
1471 NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1472 return;
1473 }
1474 do_bit_flips(ns, num);
1475 }
1476 return;
1477 }
1478
1479 mypage = NS_GET_PAGE(ns);
1480 if (mypage->byte == NULL) {
1481 NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
1482 memset(ns->buf.byte, 0xFF, num);
1483 } else {
1484 NS_DBG("read_page: page %d allocated, reading from %d\n",
1485 ns->regs.row, ns->regs.column + ns->regs.off);
1486 if (do_read_error(ns, num))
1487 return;
1488 memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
1489 do_bit_flips(ns, num);
1490 }
1491}
1492
1493
1494
1495
1496static void erase_sector(struct nandsim *ns)
1497{
1498 union ns_mem *mypage;
1499 int i;
1500
1501 if (ns->cfile) {
1502 for (i = 0; i < ns->geom.pgsec; i++)
1503 if (__test_and_clear_bit(ns->regs.row + i,
1504 ns->pages_written)) {
1505 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
1506 }
1507 return;
1508 }
1509
1510 mypage = NS_GET_PAGE(ns);
1511 for (i = 0; i < ns->geom.pgsec; i++) {
1512 if (mypage->byte != NULL) {
1513 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
1514 kmem_cache_free(ns->nand_pages_slab, mypage->byte);
1515 mypage->byte = NULL;
1516 }
1517 mypage++;
1518 }
1519}
1520
1521
1522
1523
1524static int prog_page(struct nandsim *ns, int num)
1525{
1526 int i;
1527 union ns_mem *mypage;
1528 u_char *pg_off;
1529
1530 if (ns->cfile) {
1531 loff_t off;
1532 ssize_t tx;
1533 int all;
1534
1535 NS_DBG("prog_page: writing page %d\n", ns->regs.row);
1536 pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
1537 off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
1538 if (!test_bit(ns->regs.row, ns->pages_written)) {
1539 all = 1;
1540 memset(ns->file_buf, 0xff, ns->geom.pgszoob);
1541 } else {
1542 all = 0;
1543 tx = read_file(ns, ns->cfile, pg_off, num, off);
1544 if (tx != num) {
1545 NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1546 return -1;
1547 }
1548 }
1549 for (i = 0; i < num; i++)
1550 pg_off[i] &= ns->buf.byte[i];
1551 if (all) {
1552 loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
1553 tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
1554 if (tx != ns->geom.pgszoob) {
1555 NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1556 return -1;
1557 }
1558 __set_bit(ns->regs.row, ns->pages_written);
1559 } else {
1560 tx = write_file(ns, ns->cfile, pg_off, num, off);
1561 if (tx != num) {
1562 NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1563 return -1;
1564 }
1565 }
1566 return 0;
1567 }
1568
1569 mypage = NS_GET_PAGE(ns);
1570 if (mypage->byte == NULL) {
1571 NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
1572
1573
1574
1575
1576
1577
1578 mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
1579 if (mypage->byte == NULL) {
1580 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
1581 return -1;
1582 }
1583 memset(mypage->byte, 0xFF, ns->geom.pgszoob);
1584 }
1585
1586 pg_off = NS_PAGE_BYTE_OFF(ns);
1587 for (i = 0; i < num; i++)
1588 pg_off[i] &= ns->buf.byte[i];
1589
1590 return 0;
1591}
1592
1593
1594
1595
1596
1597
1598static int do_state_action(struct nandsim *ns, uint32_t action)
1599{
1600 int num;
1601 int busdiv = ns->busw == 8 ? 1 : 2;
1602 unsigned int erase_block_no, page_no;
1603
1604 action &= ACTION_MASK;
1605
1606
1607 if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
1608 NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
1609 return -1;
1610 }
1611
1612 switch (action) {
1613
1614 case ACTION_CPY:
1615
1616
1617
1618
1619
1620 if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
1621 NS_ERR("do_state_action: column number is too large\n");
1622 break;
1623 }
1624 num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1625 read_page(ns, num);
1626
1627 NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
1628 num, NS_RAW_OFFSET(ns) + ns->regs.off);
1629
1630 if (ns->regs.off == 0)
1631 NS_LOG("read page %d\n", ns->regs.row);
1632 else if (ns->regs.off < ns->geom.pgsz)
1633 NS_LOG("read page %d (second half)\n", ns->regs.row);
1634 else
1635 NS_LOG("read OOB of page %d\n", ns->regs.row);
1636
1637 NS_UDELAY(access_delay);
1638 NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
1639
1640 break;
1641
1642 case ACTION_SECERASE:
1643
1644
1645
1646
1647 if (ns->lines.wp) {
1648 NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
1649 return -1;
1650 }
1651
1652 if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
1653 || (ns->regs.row & ~(ns->geom.secsz - 1))) {
1654 NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
1655 return -1;
1656 }
1657
1658 ns->regs.row = (ns->regs.row <<
1659 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
1660 ns->regs.column = 0;
1661
1662 erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
1663
1664 NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
1665 ns->regs.row, NS_RAW_OFFSET(ns));
1666 NS_LOG("erase sector %u\n", erase_block_no);
1667
1668 erase_sector(ns);
1669
1670 NS_MDELAY(erase_delay);
1671
1672 if (erase_block_wear)
1673 update_wear(erase_block_no);
1674
1675 if (erase_error(erase_block_no)) {
1676 NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
1677 return -1;
1678 }
1679
1680 break;
1681
1682 case ACTION_PRGPAGE:
1683
1684
1685
1686
1687 if (ns->lines.wp) {
1688 NS_WARN("do_state_action: device is write-protected, programm\n");
1689 return -1;
1690 }
1691
1692 num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1693 if (num != ns->regs.count) {
1694 NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
1695 ns->regs.count, num);
1696 return -1;
1697 }
1698
1699 if (prog_page(ns, num) == -1)
1700 return -1;
1701
1702 page_no = ns->regs.row;
1703
1704 NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
1705 num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
1706 NS_LOG("programm page %d\n", ns->regs.row);
1707
1708 NS_UDELAY(programm_delay);
1709 NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
1710
1711 if (write_error(page_no)) {
1712 NS_WARN("simulating write failure in page %u\n", page_no);
1713 return -1;
1714 }
1715
1716 break;
1717
1718 case ACTION_ZEROOFF:
1719 NS_DBG("do_state_action: set internal offset to 0\n");
1720 ns->regs.off = 0;
1721 break;
1722
1723 case ACTION_HALFOFF:
1724 if (!(ns->options & OPT_PAGE512_8BIT)) {
1725 NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
1726 "byte page size 8x chips\n");
1727 return -1;
1728 }
1729 NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
1730 ns->regs.off = ns->geom.pgsz/2;
1731 break;
1732
1733 case ACTION_OOBOFF:
1734 NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
1735 ns->regs.off = ns->geom.pgsz;
1736 break;
1737
1738 default:
1739 NS_DBG("do_state_action: BUG! unknown action\n");
1740 }
1741
1742 return 0;
1743}
1744
1745
1746
1747
1748static void switch_state(struct nandsim *ns)
1749{
1750 if (ns->op) {
1751
1752
1753
1754
1755
1756 ns->stateidx += 1;
1757 ns->state = ns->nxstate;
1758 ns->nxstate = ns->op[ns->stateidx + 1];
1759
1760 NS_DBG("switch_state: operation is known, switch to the next state, "
1761 "state: %s, nxstate: %s\n",
1762 get_state_name(ns->state), get_state_name(ns->nxstate));
1763
1764
1765 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1766 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1767 return;
1768 }
1769
1770 } else {
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780 ns->state = get_state_by_command(ns->regs.command);
1781
1782 NS_DBG("switch_state: operation is unknown, try to find it\n");
1783
1784 if (find_operation(ns, 0) != 0)
1785 return;
1786
1787 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1788 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1789 return;
1790 }
1791 }
1792
1793
1794 if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
1795 NS_DBG("switch_state: double the column number for 16x device\n");
1796 ns->regs.column <<= 1;
1797 }
1798
1799 if (NS_STATE(ns->nxstate) == STATE_READY) {
1800
1801
1802
1803
1804 u_char status = NS_STATUS_OK(ns);
1805
1806
1807 if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
1808 && ns->regs.count != ns->regs.num) {
1809 NS_WARN("switch_state: not all bytes were processed, %d left\n",
1810 ns->regs.num - ns->regs.count);
1811 status = NS_STATUS_FAILED(ns);
1812 }
1813
1814 NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
1815
1816 switch_to_ready_state(ns, status);
1817
1818 return;
1819 } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
1820
1821
1822
1823
1824 ns->state = ns->nxstate;
1825 ns->nxstate = ns->op[++ns->stateidx + 1];
1826 ns->regs.num = ns->regs.count = 0;
1827
1828 NS_DBG("switch_state: the next state is data I/O, switch, "
1829 "state: %s, nxstate: %s\n",
1830 get_state_name(ns->state), get_state_name(ns->nxstate));
1831
1832
1833
1834
1835
1836 switch (NS_STATE(ns->state)) {
1837 case STATE_DATAIN:
1838 case STATE_DATAOUT:
1839 ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1840 break;
1841
1842 case STATE_DATAOUT_ID:
1843 ns->regs.num = ns->geom.idbytes;
1844 break;
1845
1846 case STATE_DATAOUT_STATUS:
1847 ns->regs.count = ns->regs.num = 0;
1848 break;
1849
1850 default:
1851 NS_ERR("switch_state: BUG! unknown data state\n");
1852 }
1853
1854 } else if (ns->nxstate & STATE_ADDR_MASK) {
1855
1856
1857
1858
1859
1860 ns->regs.count = 0;
1861
1862 switch (NS_STATE(ns->nxstate)) {
1863 case STATE_ADDR_PAGE:
1864 ns->regs.num = ns->geom.pgaddrbytes;
1865
1866 break;
1867 case STATE_ADDR_SEC:
1868 ns->regs.num = ns->geom.secaddrbytes;
1869 break;
1870
1871 case STATE_ADDR_ZERO:
1872 ns->regs.num = 1;
1873 break;
1874
1875 case STATE_ADDR_COLUMN:
1876
1877 ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
1878 break;
1879
1880 default:
1881 NS_ERR("switch_state: BUG! unknown address state\n");
1882 }
1883 } else {
1884
1885
1886
1887
1888 ns->regs.num = 0;
1889 ns->regs.count = 0;
1890 }
1891}
1892
1893static u_char ns_nand_read_byte(struct mtd_info *mtd)
1894{
1895 struct nand_chip *chip = mtd_to_nand(mtd);
1896 struct nandsim *ns = nand_get_controller_data(chip);
1897 u_char outb = 0x00;
1898
1899
1900 if (!ns->lines.ce) {
1901 NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
1902 return outb;
1903 }
1904 if (ns->lines.ale || ns->lines.cle) {
1905 NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
1906 return outb;
1907 }
1908 if (!(ns->state & STATE_DATAOUT_MASK)) {
1909 NS_WARN("read_byte: unexpected data output cycle, state is %s "
1910 "return %#x\n", get_state_name(ns->state), (uint)outb);
1911 return outb;
1912 }
1913
1914
1915 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
1916 NS_DBG("read_byte: return %#x status\n", ns->regs.status);
1917 return ns->regs.status;
1918 }
1919
1920
1921 if (ns->regs.count == ns->regs.num) {
1922 NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
1923 return outb;
1924 }
1925
1926 switch (NS_STATE(ns->state)) {
1927 case STATE_DATAOUT:
1928 if (ns->busw == 8) {
1929 outb = ns->buf.byte[ns->regs.count];
1930 ns->regs.count += 1;
1931 } else {
1932 outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
1933 ns->regs.count += 2;
1934 }
1935 break;
1936 case STATE_DATAOUT_ID:
1937 NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
1938 outb = ns->ids[ns->regs.count];
1939 ns->regs.count += 1;
1940 break;
1941 default:
1942 BUG();
1943 }
1944
1945 if (ns->regs.count == ns->regs.num) {
1946 NS_DBG("read_byte: all bytes were read\n");
1947
1948 if (NS_STATE(ns->nxstate) == STATE_READY)
1949 switch_state(ns);
1950 }
1951
1952 return outb;
1953}
1954
1955static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
1956{
1957 struct nand_chip *chip = mtd_to_nand(mtd);
1958 struct nandsim *ns = nand_get_controller_data(chip);
1959
1960
1961 if (!ns->lines.ce) {
1962 NS_ERR("write_byte: chip is disabled, ignore write\n");
1963 return;
1964 }
1965 if (ns->lines.ale && ns->lines.cle) {
1966 NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
1967 return;
1968 }
1969
1970 if (ns->lines.cle == 1) {
1971
1972
1973
1974
1975 if (byte == NAND_CMD_RESET) {
1976 NS_LOG("reset chip\n");
1977 switch_to_ready_state(ns, NS_STATUS_OK(ns));
1978 return;
1979 }
1980
1981
1982 if (check_command(byte)) {
1983 NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
1984 return;
1985 }
1986
1987 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
1988 || NS_STATE(ns->state) == STATE_DATAOUT) {
1989 int row = ns->regs.row;
1990
1991 switch_state(ns);
1992 if (byte == NAND_CMD_RNDOUT)
1993 ns->regs.row = row;
1994 }
1995
1996
1997 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
1998
1999 if (!(ns->regs.command == NAND_CMD_READID &&
2000 NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
2001
2002
2003
2004
2005
2006 NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
2007 "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
2008 }
2009 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2010 }
2011
2012 NS_DBG("command byte corresponding to %s state accepted\n",
2013 get_state_name(get_state_by_command(byte)));
2014 ns->regs.command = byte;
2015 switch_state(ns);
2016
2017 } else if (ns->lines.ale == 1) {
2018
2019
2020
2021
2022 if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
2023
2024 NS_DBG("write_byte: operation isn't known yet, identify it\n");
2025
2026 if (find_operation(ns, 1) < 0)
2027 return;
2028
2029 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
2030 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2031 return;
2032 }
2033
2034 ns->regs.count = 0;
2035 switch (NS_STATE(ns->nxstate)) {
2036 case STATE_ADDR_PAGE:
2037 ns->regs.num = ns->geom.pgaddrbytes;
2038 break;
2039 case STATE_ADDR_SEC:
2040 ns->regs.num = ns->geom.secaddrbytes;
2041 break;
2042 case STATE_ADDR_ZERO:
2043 ns->regs.num = 1;
2044 break;
2045 default:
2046 BUG();
2047 }
2048 }
2049
2050
2051 if (!(ns->nxstate & STATE_ADDR_MASK)) {
2052 NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
2053 "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
2054 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2055 return;
2056 }
2057
2058
2059 if (ns->regs.count == ns->regs.num) {
2060 NS_ERR("write_byte: no more address bytes expected\n");
2061 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2062 return;
2063 }
2064
2065 accept_addr_byte(ns, byte);
2066
2067 ns->regs.count += 1;
2068
2069 NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
2070 (uint)byte, ns->regs.count, ns->regs.num);
2071
2072 if (ns->regs.count == ns->regs.num) {
2073 NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
2074 switch_state(ns);
2075 }
2076
2077 } else {
2078
2079
2080
2081
2082
2083 if (!(ns->state & STATE_DATAIN_MASK)) {
2084 NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
2085 "switch to %s\n", (uint)byte,
2086 get_state_name(ns->state), get_state_name(STATE_READY));
2087 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2088 return;
2089 }
2090
2091
2092 if (ns->regs.count == ns->regs.num) {
2093 NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
2094 ns->regs.num);
2095 return;
2096 }
2097
2098 if (ns->busw == 8) {
2099 ns->buf.byte[ns->regs.count] = byte;
2100 ns->regs.count += 1;
2101 } else {
2102 ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
2103 ns->regs.count += 2;
2104 }
2105 }
2106
2107 return;
2108}
2109
2110static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
2111{
2112 struct nand_chip *chip = mtd_to_nand(mtd);
2113 struct nandsim *ns = nand_get_controller_data(chip);
2114
2115 ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
2116 ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
2117 ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
2118
2119 if (cmd != NAND_CMD_NONE)
2120 ns_nand_write_byte(mtd, cmd);
2121}
2122
2123static int ns_device_ready(struct mtd_info *mtd)
2124{
2125 NS_DBG("device_ready\n");
2126 return 1;
2127}
2128
2129static uint16_t ns_nand_read_word(struct mtd_info *mtd)
2130{
2131 struct nand_chip *chip = mtd_to_nand(mtd);
2132
2133 NS_DBG("read_word\n");
2134
2135 return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
2136}
2137
2138static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
2139{
2140 struct nand_chip *chip = mtd_to_nand(mtd);
2141 struct nandsim *ns = nand_get_controller_data(chip);
2142
2143
2144 if (!(ns->state & STATE_DATAIN_MASK)) {
2145 NS_ERR("write_buf: data input isn't expected, state is %s, "
2146 "switch to STATE_READY\n", get_state_name(ns->state));
2147 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2148 return;
2149 }
2150
2151
2152 if (ns->regs.count + len > ns->regs.num) {
2153 NS_ERR("write_buf: too many input bytes\n");
2154 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2155 return;
2156 }
2157
2158 memcpy(ns->buf.byte + ns->regs.count, buf, len);
2159 ns->regs.count += len;
2160
2161 if (ns->regs.count == ns->regs.num) {
2162 NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
2163 }
2164}
2165
2166static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
2167{
2168 struct nand_chip *chip = mtd_to_nand(mtd);
2169 struct nandsim *ns = nand_get_controller_data(chip);
2170
2171
2172 if (!ns->lines.ce) {
2173 NS_ERR("read_buf: chip is disabled\n");
2174 return;
2175 }
2176 if (ns->lines.ale || ns->lines.cle) {
2177 NS_ERR("read_buf: ALE or CLE pin is high\n");
2178 return;
2179 }
2180 if (!(ns->state & STATE_DATAOUT_MASK)) {
2181 NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
2182 get_state_name(ns->state));
2183 return;
2184 }
2185
2186 if (NS_STATE(ns->state) != STATE_DATAOUT) {
2187 int i;
2188
2189 for (i = 0; i < len; i++)
2190 buf[i] = mtd_to_nand(mtd)->read_byte(mtd);
2191
2192 return;
2193 }
2194
2195
2196 if (ns->regs.count + len > ns->regs.num) {
2197 NS_ERR("read_buf: too many bytes to read\n");
2198 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2199 return;
2200 }
2201
2202 memcpy(buf, ns->buf.byte + ns->regs.count, len);
2203 ns->regs.count += len;
2204
2205 if (ns->regs.count == ns->regs.num) {
2206 if (NS_STATE(ns->nxstate) == STATE_READY)
2207 switch_state(ns);
2208 }
2209
2210 return;
2211}
2212
2213
2214
2215
2216static int __init ns_init_module(void)
2217{
2218 struct nand_chip *chip;
2219 struct nandsim *nand;
2220 int retval = -ENOMEM, i;
2221
2222 if (bus_width != 8 && bus_width != 16) {
2223 NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
2224 return -EINVAL;
2225 }
2226
2227
2228 chip = kzalloc(sizeof(struct nand_chip) + sizeof(struct nandsim),
2229 GFP_KERNEL);
2230 if (!chip) {
2231 NS_ERR("unable to allocate core structures.\n");
2232 return -ENOMEM;
2233 }
2234 nsmtd = nand_to_mtd(chip);
2235 nand = (struct nandsim *)(chip + 1);
2236 nand_set_controller_data(chip, (void *)nand);
2237
2238
2239
2240
2241 chip->cmd_ctrl = ns_hwcontrol;
2242 chip->read_byte = ns_nand_read_byte;
2243 chip->dev_ready = ns_device_ready;
2244 chip->write_buf = ns_nand_write_buf;
2245 chip->read_buf = ns_nand_read_buf;
2246 chip->read_word = ns_nand_read_word;
2247 chip->ecc.mode = NAND_ECC_SOFT;
2248 chip->ecc.algo = NAND_ECC_HAMMING;
2249
2250
2251 chip->options |= NAND_SKIP_BBTSCAN;
2252
2253 switch (bbt) {
2254 case 2:
2255 chip->bbt_options |= NAND_BBT_NO_OOB;
2256 case 1:
2257 chip->bbt_options |= NAND_BBT_USE_FLASH;
2258 case 0:
2259 break;
2260 default:
2261 NS_ERR("bbt has to be 0..2\n");
2262 retval = -EINVAL;
2263 goto error;
2264 }
2265
2266
2267
2268
2269 if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
2270 nand->geom.idbytes = 8;
2271 else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
2272 nand->geom.idbytes = 6;
2273 else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
2274 nand->geom.idbytes = 4;
2275 else
2276 nand->geom.idbytes = 2;
2277 nand->regs.status = NS_STATUS_OK(nand);
2278 nand->nxstate = STATE_UNKNOWN;
2279 nand->options |= OPT_PAGE512;
2280 memcpy(nand->ids, id_bytes, sizeof(nand->ids));
2281 if (bus_width == 16) {
2282 nand->busw = 16;
2283 chip->options |= NAND_BUSWIDTH_16;
2284 }
2285
2286 nsmtd->owner = THIS_MODULE;
2287
2288 if ((retval = parse_weakblocks()) != 0)
2289 goto error;
2290
2291 if ((retval = parse_weakpages()) != 0)
2292 goto error;
2293
2294 if ((retval = parse_gravepages()) != 0)
2295 goto error;
2296
2297 retval = nand_scan_ident(nsmtd, 1, NULL);
2298 if (retval) {
2299 NS_ERR("cannot scan NAND Simulator device\n");
2300 goto error;
2301 }
2302
2303 if (bch) {
2304 unsigned int eccsteps, eccbytes;
2305 if (!mtd_nand_has_bch()) {
2306 NS_ERR("BCH ECC support is disabled\n");
2307 retval = -EINVAL;
2308 goto error;
2309 }
2310
2311 eccsteps = nsmtd->writesize/512;
2312 eccbytes = (bch*13+7)/8;
2313
2314 if ((nsmtd->oobsize < 64) || !eccsteps) {
2315 NS_ERR("bch not available on small page devices\n");
2316 retval = -EINVAL;
2317 goto error;
2318 }
2319 if ((eccbytes*eccsteps+2) > nsmtd->oobsize) {
2320 NS_ERR("invalid bch value %u\n", bch);
2321 retval = -EINVAL;
2322 goto error;
2323 }
2324 chip->ecc.mode = NAND_ECC_SOFT;
2325 chip->ecc.algo = NAND_ECC_BCH;
2326 chip->ecc.size = 512;
2327 chip->ecc.strength = bch;
2328 chip->ecc.bytes = eccbytes;
2329 NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
2330 }
2331
2332 retval = nand_scan_tail(nsmtd);
2333 if (retval) {
2334 NS_ERR("can't register NAND Simulator\n");
2335 goto error;
2336 }
2337
2338 if (overridesize) {
2339 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
2340 if (new_size >> overridesize != nsmtd->erasesize) {
2341 NS_ERR("overridesize is too big\n");
2342 retval = -EINVAL;
2343 goto err_exit;
2344 }
2345
2346 nsmtd->size = new_size;
2347 chip->chipsize = new_size;
2348 chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
2349 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
2350 }
2351
2352 if ((retval = setup_wear_reporting(nsmtd)) != 0)
2353 goto err_exit;
2354
2355 if ((retval = nandsim_debugfs_create(nand)) != 0)
2356 goto err_exit;
2357
2358 if ((retval = init_nandsim(nsmtd)) != 0)
2359 goto err_exit;
2360
2361 if ((retval = chip->scan_bbt(nsmtd)) != 0)
2362 goto err_exit;
2363
2364 if ((retval = parse_badblocks(nand, nsmtd)) != 0)
2365 goto err_exit;
2366
2367
2368 retval = mtd_device_register(nsmtd, &nand->partitions[0],
2369 nand->nbparts);
2370 if (retval != 0)
2371 goto err_exit;
2372
2373 return 0;
2374
2375err_exit:
2376 nandsim_debugfs_remove(nand);
2377 free_nandsim(nand);
2378 nand_release(nsmtd);
2379 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
2380 kfree(nand->partitions[i].name);
2381error:
2382 kfree(chip);
2383 free_lists();
2384
2385 return retval;
2386}
2387
2388module_init(ns_init_module);
2389
2390
2391
2392
2393static void __exit ns_cleanup_module(void)
2394{
2395 struct nand_chip *chip = mtd_to_nand(nsmtd);
2396 struct nandsim *ns = nand_get_controller_data(chip);
2397 int i;
2398
2399 nandsim_debugfs_remove(ns);
2400 free_nandsim(ns);
2401 nand_release(nsmtd);
2402 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
2403 kfree(ns->partitions[i].name);
2404 kfree(mtd_to_nand(nsmtd));
2405 free_lists();
2406}
2407
2408module_exit(ns_cleanup_module);
2409
2410MODULE_LICENSE ("GPL");
2411MODULE_AUTHOR ("Artem B. Bityuckiy");
2412MODULE_DESCRIPTION ("The NAND flash simulator");
2413