1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/vmalloc.h>
31#include <linux/math64.h>
32#include <linux/slab.h>
33#include <linux/errno.h>
34#include <linux/string.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/nand.h>
37#include <linux/mtd/nand_bch.h>
38#include <linux/mtd/partitions.h>
39#include <linux/delay.h>
40#include <linux/list.h>
41#include <linux/random.h>
42#include <linux/sched.h>
43#include <linux/fs.h>
44#include <linux/pagemap.h>
45#include <linux/seq_file.h>
46#include <linux/debugfs.h>
47
48
49#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
50 !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
51 !defined(CONFIG_NANDSIM_THIRD_ID_BYTE) || \
52 !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
53#define CONFIG_NANDSIM_FIRST_ID_BYTE 0x98
54#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
55#define CONFIG_NANDSIM_THIRD_ID_BYTE 0xFF
56#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF
57#endif
58
59#ifndef CONFIG_NANDSIM_ACCESS_DELAY
60#define CONFIG_NANDSIM_ACCESS_DELAY 25
61#endif
62#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
63#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
64#endif
65#ifndef CONFIG_NANDSIM_ERASE_DELAY
66#define CONFIG_NANDSIM_ERASE_DELAY 2
67#endif
68#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
69#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
70#endif
71#ifndef CONFIG_NANDSIM_INPUT_CYCLE
72#define CONFIG_NANDSIM_INPUT_CYCLE 50
73#endif
74#ifndef CONFIG_NANDSIM_BUS_WIDTH
75#define CONFIG_NANDSIM_BUS_WIDTH 8
76#endif
77#ifndef CONFIG_NANDSIM_DO_DELAYS
78#define CONFIG_NANDSIM_DO_DELAYS 0
79#endif
80#ifndef CONFIG_NANDSIM_LOG
81#define CONFIG_NANDSIM_LOG 0
82#endif
83#ifndef CONFIG_NANDSIM_DBG
84#define CONFIG_NANDSIM_DBG 0
85#endif
86#ifndef CONFIG_NANDSIM_MAX_PARTS
87#define CONFIG_NANDSIM_MAX_PARTS 32
88#endif
89
90static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
91static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
92static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE;
93static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE;
94static uint access_delay = CONFIG_NANDSIM_ACCESS_DELAY;
95static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
96static uint erase_delay = CONFIG_NANDSIM_ERASE_DELAY;
97static uint output_cycle = CONFIG_NANDSIM_OUTPUT_CYCLE;
98static uint input_cycle = CONFIG_NANDSIM_INPUT_CYCLE;
99static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
100static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
101static uint log = CONFIG_NANDSIM_LOG;
102static uint dbg = CONFIG_NANDSIM_DBG;
103static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
104static unsigned int parts_num;
105static char *badblocks = NULL;
106static char *weakblocks = NULL;
107static char *weakpages = NULL;
108static unsigned int bitflips = 0;
109static char *gravepages = NULL;
110static unsigned int overridesize = 0;
111static char *cache_file = NULL;
112static unsigned int bbt;
113static unsigned int bch;
114
115module_param(first_id_byte, uint, 0400);
116module_param(second_id_byte, uint, 0400);
117module_param(third_id_byte, uint, 0400);
118module_param(fourth_id_byte, uint, 0400);
119module_param(access_delay, uint, 0400);
120module_param(programm_delay, uint, 0400);
121module_param(erase_delay, uint, 0400);
122module_param(output_cycle, uint, 0400);
123module_param(input_cycle, uint, 0400);
124module_param(bus_width, uint, 0400);
125module_param(do_delays, uint, 0400);
126module_param(log, uint, 0400);
127module_param(dbg, uint, 0400);
128module_param_array(parts, ulong, &parts_num, 0400);
129module_param(badblocks, charp, 0400);
130module_param(weakblocks, charp, 0400);
131module_param(weakpages, charp, 0400);
132module_param(bitflips, uint, 0400);
133module_param(gravepages, charp, 0400);
134module_param(overridesize, uint, 0400);
135module_param(cache_file, charp, 0400);
136module_param(bbt, uint, 0400);
137module_param(bch, uint, 0400);
138
139MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
140MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
141MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
142MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
143MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
144MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
145MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
146MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
147MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
148MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
149MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
150MODULE_PARM_DESC(log, "Perform logging if not zero");
151MODULE_PARM_DESC(dbg, "Output debug information if not zero");
152MODULE_PARM_DESC(parts, "Partition sizes (in erase blocks) separated by commas");
153
154MODULE_PARM_DESC(badblocks, "Erase blocks that are initially marked bad, separated by commas");
155MODULE_PARM_DESC(weakblocks, "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
156 " separated by commas e.g. 113:2 means eb 113"
157 " can be erased only twice before failing");
158MODULE_PARM_DESC(weakpages, "Weak pages [: maximum writes (defaults to 3)]"
159 " separated by commas e.g. 1401:2 means page 1401"
160 " can be written only twice before failing");
161MODULE_PARM_DESC(bitflips, "Maximum number of random bit flips per page (zero by default)");
162MODULE_PARM_DESC(gravepages, "Pages that lose data [: maximum reads (defaults to 3)]"
163 " separated by commas e.g. 1401:2 means page 1401"
164 " can be read only twice before failing");
165MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the ID bytes. "
166 "The size is specified in erase blocks and as the exponent of a power of two"
167 " e.g. 5 means a size of 32 erase blocks");
168MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
169MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
170MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
171 "be correctable in 512-byte blocks");
172
173
174#define NS_LARGEST_PAGE_SIZE 4096
175
176
177#define NS_OUTPUT_PREFIX "[nandsim]"
178
179
180#define NS_LOG(args...) \
181 do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
182#define NS_DBG(args...) \
183 do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
184#define NS_WARN(args...) \
185 do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
186#define NS_ERR(args...) \
187 do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
188#define NS_INFO(args...) \
189 do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
190
191
192#define NS_UDELAY(us) \
193 do { if (do_delays) udelay(us); } while(0)
194#define NS_MDELAY(us) \
195 do { if (do_delays) mdelay(us); } while(0)
196
197
198#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
199
200
201#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
202
203
204#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
205
206
207#define NS_RAW_OFFSET(ns) \
208 (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
209
210
211#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
212
213
214#define STATE_CMD_READ0 0x00000001
215#define STATE_CMD_READ1 0x00000002
216#define STATE_CMD_READSTART 0x00000003
217#define STATE_CMD_PAGEPROG 0x00000004
218#define STATE_CMD_READOOB 0x00000005
219#define STATE_CMD_ERASE1 0x00000006
220#define STATE_CMD_STATUS 0x00000007
221#define STATE_CMD_SEQIN 0x00000009
222#define STATE_CMD_READID 0x0000000A
223#define STATE_CMD_ERASE2 0x0000000B
224#define STATE_CMD_RESET 0x0000000C
225#define STATE_CMD_RNDOUT 0x0000000D
226#define STATE_CMD_RNDOUTSTART 0x0000000E
227#define STATE_CMD_MASK 0x0000000F
228
229
230#define STATE_ADDR_PAGE 0x00000010
231#define STATE_ADDR_SEC 0x00000020
232#define STATE_ADDR_COLUMN 0x00000030
233#define STATE_ADDR_ZERO 0x00000040
234#define STATE_ADDR_MASK 0x00000070
235
236
237#define STATE_DATAIN 0x00000100
238#define STATE_DATAIN_MASK 0x00000100
239
240#define STATE_DATAOUT 0x00001000
241#define STATE_DATAOUT_ID 0x00002000
242#define STATE_DATAOUT_STATUS 0x00003000
243#define STATE_DATAOUT_STATUS_M 0x00004000
244#define STATE_DATAOUT_MASK 0x00007000
245
246
247#define STATE_READY 0x00000000
248
249
250#define STATE_UNKNOWN 0x10000000
251
252
253#define ACTION_CPY 0x00100000
254#define ACTION_PRGPAGE 0x00200000
255#define ACTION_SECERASE 0x00300000
256#define ACTION_ZEROOFF 0x00400000
257#define ACTION_HALFOFF 0x00500000
258#define ACTION_OOBOFF 0x00600000
259#define ACTION_MASK 0x00700000
260
261#define NS_OPER_NUM 13
262#define NS_OPER_STATES 6
263
264#define OPT_ANY 0xFFFFFFFF
265#define OPT_PAGE512 0x00000002
266#define OPT_PAGE2048 0x00000008
267#define OPT_SMARTMEDIA 0x00000010
268#define OPT_PAGE512_8BIT 0x00000040
269#define OPT_PAGE4096 0x00000080
270#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096)
271#define OPT_SMALLPAGE (OPT_PAGE512)
272
273
274#define NS_STATE(x) ((x) & ~ACTION_MASK)
275
276
277
278
279
280
281#define NS_MAX_PREVSTATES 1
282
283
284#define NS_MAX_HELD_PAGES 16
285
286struct nandsim_debug_info {
287 struct dentry *dfs_root;
288 struct dentry *dfs_wear_report;
289};
290
291
292
293
294union ns_mem {
295 u_char *byte;
296 uint16_t *word;
297};
298
299
300
301
302struct nandsim {
303 struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
304 unsigned int nbparts;
305
306 uint busw;
307 u_char ids[4];
308 uint32_t options;
309 uint32_t state;
310 uint32_t nxstate;
311
312 uint32_t *op;
313 uint32_t pstates[NS_MAX_PREVSTATES];
314 uint16_t npstates;
315 uint16_t stateidx;
316
317
318 union ns_mem *pages;
319
320
321 struct kmem_cache *nand_pages_slab;
322
323
324 union ns_mem buf;
325
326
327 struct {
328 uint64_t totsz;
329 uint32_t secsz;
330 uint pgsz;
331 uint oobsz;
332 uint64_t totszoob;
333 uint pgszoob;
334 uint secszoob;
335 uint pgnum;
336 uint pgsec;
337 uint secshift;
338 uint pgshift;
339 uint pgaddrbytes;
340 uint secaddrbytes;
341 uint idbytes;
342 } geom;
343
344
345 struct {
346 unsigned command;
347 u_char status;
348 uint row;
349 uint column;
350 uint count;
351 uint num;
352 uint off;
353 } regs;
354
355
356 struct {
357 int ce;
358 int cle;
359 int ale;
360 int wp;
361 } lines;
362
363
364 struct file *cfile;
365 unsigned long *pages_written;
366 void *file_buf;
367 struct page *held_pages[NS_MAX_HELD_PAGES];
368 int held_cnt;
369
370 struct nandsim_debug_info dbg;
371};
372
373
374
375
376
377static struct nandsim_operations {
378 uint32_t reqopts;
379 uint32_t states[NS_OPER_STATES];
380} ops[NS_OPER_NUM] = {
381
382 {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
383 STATE_DATAOUT, STATE_READY}},
384
385 {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
386 STATE_DATAOUT, STATE_READY}},
387
388 {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
389 STATE_DATAOUT, STATE_READY}},
390
391 {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
392 STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
393
394 {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
395 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
396
397 {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
398 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
399
400 {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
401 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
402
403 {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
404
405 {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
406
407 {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
408
409 {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
410 STATE_DATAOUT, STATE_READY}},
411
412 {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
413 STATE_DATAOUT, STATE_READY}},
414};
415
416struct weak_block {
417 struct list_head list;
418 unsigned int erase_block_no;
419 unsigned int max_erases;
420 unsigned int erases_done;
421};
422
423static LIST_HEAD(weak_blocks);
424
425struct weak_page {
426 struct list_head list;
427 unsigned int page_no;
428 unsigned int max_writes;
429 unsigned int writes_done;
430};
431
432static LIST_HEAD(weak_pages);
433
434struct grave_page {
435 struct list_head list;
436 unsigned int page_no;
437 unsigned int max_reads;
438 unsigned int reads_done;
439};
440
441static LIST_HEAD(grave_pages);
442
443static unsigned long *erase_block_wear = NULL;
444static unsigned int wear_eb_count = 0;
445static unsigned long total_wear = 0;
446
447
448static struct mtd_info *nsmtd;
449
450static int nandsim_debugfs_show(struct seq_file *m, void *private)
451{
452 unsigned long wmin = -1, wmax = 0, avg;
453 unsigned long deciles[10], decile_max[10], tot = 0;
454 unsigned int i;
455
456
457 for (i = 0; i < wear_eb_count; ++i) {
458 unsigned long wear = erase_block_wear[i];
459 if (wear < wmin)
460 wmin = wear;
461 if (wear > wmax)
462 wmax = wear;
463 tot += wear;
464 }
465
466 for (i = 0; i < 9; ++i) {
467 deciles[i] = 0;
468 decile_max[i] = (wmax * (i + 1) + 5) / 10;
469 }
470 deciles[9] = 0;
471 decile_max[9] = wmax;
472 for (i = 0; i < wear_eb_count; ++i) {
473 int d;
474 unsigned long wear = erase_block_wear[i];
475 for (d = 0; d < 10; ++d)
476 if (wear <= decile_max[d]) {
477 deciles[d] += 1;
478 break;
479 }
480 }
481 avg = tot / wear_eb_count;
482
483
484 seq_printf(m, "Total numbers of erases: %lu\n", tot);
485 seq_printf(m, "Number of erase blocks: %u\n", wear_eb_count);
486 seq_printf(m, "Average number of erases: %lu\n", avg);
487 seq_printf(m, "Maximum number of erases: %lu\n", wmax);
488 seq_printf(m, "Minimum number of erases: %lu\n", wmin);
489 for (i = 0; i < 10; ++i) {
490 unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
491 if (from > decile_max[i])
492 continue;
493 seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
494 from,
495 decile_max[i],
496 deciles[i]);
497 }
498
499 return 0;
500}
501
502static int nandsim_debugfs_open(struct inode *inode, struct file *file)
503{
504 return single_open(file, nandsim_debugfs_show, inode->i_private);
505}
506
507static const struct file_operations dfs_fops = {
508 .open = nandsim_debugfs_open,
509 .read = seq_read,
510 .llseek = seq_lseek,
511 .release = single_release,
512};
513
514
515
516
517
518
519
520
521static int nandsim_debugfs_create(struct nandsim *dev)
522{
523 struct nandsim_debug_info *dbg = &dev->dbg;
524 struct dentry *dent;
525 int err;
526
527 if (!IS_ENABLED(CONFIG_DEBUG_FS))
528 return 0;
529
530 dent = debugfs_create_dir("nandsim", NULL);
531 if (IS_ERR_OR_NULL(dent)) {
532 int err = dent ? -ENODEV : PTR_ERR(dent);
533
534 NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
535 err);
536 return err;
537 }
538 dbg->dfs_root = dent;
539
540 dent = debugfs_create_file("wear_report", S_IRUSR,
541 dbg->dfs_root, dev, &dfs_fops);
542 if (IS_ERR_OR_NULL(dent))
543 goto out_remove;
544 dbg->dfs_wear_report = dent;
545
546 return 0;
547
548out_remove:
549 debugfs_remove_recursive(dbg->dfs_root);
550 err = dent ? PTR_ERR(dent) : -ENODEV;
551 return err;
552}
553
554
555
556
557static void nandsim_debugfs_remove(struct nandsim *ns)
558{
559 if (IS_ENABLED(CONFIG_DEBUG_FS))
560 debugfs_remove_recursive(ns->dbg.dfs_root);
561}
562
563
564
565
566
567
568
569static int alloc_device(struct nandsim *ns)
570{
571 struct file *cfile;
572 int i, err;
573
574 if (cache_file) {
575 cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
576 if (IS_ERR(cfile))
577 return PTR_ERR(cfile);
578 if (!(cfile->f_mode & FMODE_CAN_READ)) {
579 NS_ERR("alloc_device: cache file not readable\n");
580 err = -EINVAL;
581 goto err_close;
582 }
583 if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
584 NS_ERR("alloc_device: cache file not writeable\n");
585 err = -EINVAL;
586 goto err_close;
587 }
588 ns->pages_written = vzalloc(BITS_TO_LONGS(ns->geom.pgnum) *
589 sizeof(unsigned long));
590 if (!ns->pages_written) {
591 NS_ERR("alloc_device: unable to allocate pages written array\n");
592 err = -ENOMEM;
593 goto err_close;
594 }
595 ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
596 if (!ns->file_buf) {
597 NS_ERR("alloc_device: unable to allocate file buf\n");
598 err = -ENOMEM;
599 goto err_free;
600 }
601 ns->cfile = cfile;
602 return 0;
603 }
604
605 ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
606 if (!ns->pages) {
607 NS_ERR("alloc_device: unable to allocate page array\n");
608 return -ENOMEM;
609 }
610 for (i = 0; i < ns->geom.pgnum; i++) {
611 ns->pages[i].byte = NULL;
612 }
613 ns->nand_pages_slab = kmem_cache_create("nandsim",
614 ns->geom.pgszoob, 0, 0, NULL);
615 if (!ns->nand_pages_slab) {
616 NS_ERR("cache_create: unable to create kmem_cache\n");
617 return -ENOMEM;
618 }
619
620 return 0;
621
622err_free:
623 vfree(ns->pages_written);
624err_close:
625 filp_close(cfile, NULL);
626 return err;
627}
628
629
630
631
632static void free_device(struct nandsim *ns)
633{
634 int i;
635
636 if (ns->cfile) {
637 kfree(ns->file_buf);
638 vfree(ns->pages_written);
639 filp_close(ns->cfile, NULL);
640 return;
641 }
642
643 if (ns->pages) {
644 for (i = 0; i < ns->geom.pgnum; i++) {
645 if (ns->pages[i].byte)
646 kmem_cache_free(ns->nand_pages_slab,
647 ns->pages[i].byte);
648 }
649 kmem_cache_destroy(ns->nand_pages_slab);
650 vfree(ns->pages);
651 }
652}
653
654static char *get_partition_name(int i)
655{
656 return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
657}
658
659
660
661
662
663
664static int init_nandsim(struct mtd_info *mtd)
665{
666 struct nand_chip *chip = mtd->priv;
667 struct nandsim *ns = chip->priv;
668 int i, ret = 0;
669 uint64_t remains;
670 uint64_t next_offset;
671
672 if (NS_IS_INITIALIZED(ns)) {
673 NS_ERR("init_nandsim: nandsim is already initialized\n");
674 return -EIO;
675 }
676
677
678 chip->chip_delay = 0;
679
680
681 ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
682 ns->geom.totsz = mtd->size;
683 ns->geom.pgsz = mtd->writesize;
684 ns->geom.oobsz = mtd->oobsize;
685 ns->geom.secsz = mtd->erasesize;
686 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
687 ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
688 ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
689 ns->geom.secshift = ffs(ns->geom.secsz) - 1;
690 ns->geom.pgshift = chip->page_shift;
691 ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
692 ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
693 ns->options = 0;
694
695 if (ns->geom.pgsz == 512) {
696 ns->options |= OPT_PAGE512;
697 if (ns->busw == 8)
698 ns->options |= OPT_PAGE512_8BIT;
699 } else if (ns->geom.pgsz == 2048) {
700 ns->options |= OPT_PAGE2048;
701 } else if (ns->geom.pgsz == 4096) {
702 ns->options |= OPT_PAGE4096;
703 } else {
704 NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
705 return -EIO;
706 }
707
708 if (ns->options & OPT_SMALLPAGE) {
709 if (ns->geom.totsz <= (32 << 20)) {
710 ns->geom.pgaddrbytes = 3;
711 ns->geom.secaddrbytes = 2;
712 } else {
713 ns->geom.pgaddrbytes = 4;
714 ns->geom.secaddrbytes = 3;
715 }
716 } else {
717 if (ns->geom.totsz <= (128 << 20)) {
718 ns->geom.pgaddrbytes = 4;
719 ns->geom.secaddrbytes = 2;
720 } else {
721 ns->geom.pgaddrbytes = 5;
722 ns->geom.secaddrbytes = 3;
723 }
724 }
725
726
727 if (parts_num > ARRAY_SIZE(ns->partitions)) {
728 NS_ERR("too many partitions.\n");
729 ret = -EINVAL;
730 goto error;
731 }
732 remains = ns->geom.totsz;
733 next_offset = 0;
734 for (i = 0; i < parts_num; ++i) {
735 uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
736
737 if (!part_sz || part_sz > remains) {
738 NS_ERR("bad partition size.\n");
739 ret = -EINVAL;
740 goto error;
741 }
742 ns->partitions[i].name = get_partition_name(i);
743 ns->partitions[i].offset = next_offset;
744 ns->partitions[i].size = part_sz;
745 next_offset += ns->partitions[i].size;
746 remains -= ns->partitions[i].size;
747 }
748 ns->nbparts = parts_num;
749 if (remains) {
750 if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
751 NS_ERR("too many partitions.\n");
752 ret = -EINVAL;
753 goto error;
754 }
755 ns->partitions[i].name = get_partition_name(i);
756 ns->partitions[i].offset = next_offset;
757 ns->partitions[i].size = remains;
758 ns->nbparts += 1;
759 }
760
761 if (ns->busw == 16)
762 NS_WARN("16-bit flashes support wasn't tested\n");
763
764 printk("flash size: %llu MiB\n",
765 (unsigned long long)ns->geom.totsz >> 20);
766 printk("page size: %u bytes\n", ns->geom.pgsz);
767 printk("OOB area size: %u bytes\n", ns->geom.oobsz);
768 printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
769 printk("pages number: %u\n", ns->geom.pgnum);
770 printk("pages per sector: %u\n", ns->geom.pgsec);
771 printk("bus width: %u\n", ns->busw);
772 printk("bits in sector size: %u\n", ns->geom.secshift);
773 printk("bits in page size: %u\n", ns->geom.pgshift);
774 printk("bits in OOB size: %u\n", ffs(ns->geom.oobsz) - 1);
775 printk("flash size with OOB: %llu KiB\n",
776 (unsigned long long)ns->geom.totszoob >> 10);
777 printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
778 printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
779 printk("options: %#x\n", ns->options);
780
781 if ((ret = alloc_device(ns)) != 0)
782 goto error;
783
784
785 ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
786 if (!ns->buf.byte) {
787 NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
788 ns->geom.pgszoob);
789 ret = -ENOMEM;
790 goto error;
791 }
792 memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
793
794 return 0;
795
796error:
797 free_device(ns);
798
799 return ret;
800}
801
802
803
804
805static void free_nandsim(struct nandsim *ns)
806{
807 kfree(ns->buf.byte);
808 free_device(ns);
809
810 return;
811}
812
813static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
814{
815 char *w;
816 int zero_ok;
817 unsigned int erase_block_no;
818 loff_t offset;
819
820 if (!badblocks)
821 return 0;
822 w = badblocks;
823 do {
824 zero_ok = (*w == '0' ? 1 : 0);
825 erase_block_no = simple_strtoul(w, &w, 0);
826 if (!zero_ok && !erase_block_no) {
827 NS_ERR("invalid badblocks.\n");
828 return -EINVAL;
829 }
830 offset = erase_block_no * ns->geom.secsz;
831 if (mtd_block_markbad(mtd, offset)) {
832 NS_ERR("invalid badblocks.\n");
833 return -EINVAL;
834 }
835 if (*w == ',')
836 w += 1;
837 } while (*w);
838 return 0;
839}
840
841static int parse_weakblocks(void)
842{
843 char *w;
844 int zero_ok;
845 unsigned int erase_block_no;
846 unsigned int max_erases;
847 struct weak_block *wb;
848
849 if (!weakblocks)
850 return 0;
851 w = weakblocks;
852 do {
853 zero_ok = (*w == '0' ? 1 : 0);
854 erase_block_no = simple_strtoul(w, &w, 0);
855 if (!zero_ok && !erase_block_no) {
856 NS_ERR("invalid weakblocks.\n");
857 return -EINVAL;
858 }
859 max_erases = 3;
860 if (*w == ':') {
861 w += 1;
862 max_erases = simple_strtoul(w, &w, 0);
863 }
864 if (*w == ',')
865 w += 1;
866 wb = kzalloc(sizeof(*wb), GFP_KERNEL);
867 if (!wb) {
868 NS_ERR("unable to allocate memory.\n");
869 return -ENOMEM;
870 }
871 wb->erase_block_no = erase_block_no;
872 wb->max_erases = max_erases;
873 list_add(&wb->list, &weak_blocks);
874 } while (*w);
875 return 0;
876}
877
878static int erase_error(unsigned int erase_block_no)
879{
880 struct weak_block *wb;
881
882 list_for_each_entry(wb, &weak_blocks, list)
883 if (wb->erase_block_no == erase_block_no) {
884 if (wb->erases_done >= wb->max_erases)
885 return 1;
886 wb->erases_done += 1;
887 return 0;
888 }
889 return 0;
890}
891
892static int parse_weakpages(void)
893{
894 char *w;
895 int zero_ok;
896 unsigned int page_no;
897 unsigned int max_writes;
898 struct weak_page *wp;
899
900 if (!weakpages)
901 return 0;
902 w = weakpages;
903 do {
904 zero_ok = (*w == '0' ? 1 : 0);
905 page_no = simple_strtoul(w, &w, 0);
906 if (!zero_ok && !page_no) {
907 NS_ERR("invalid weakpagess.\n");
908 return -EINVAL;
909 }
910 max_writes = 3;
911 if (*w == ':') {
912 w += 1;
913 max_writes = simple_strtoul(w, &w, 0);
914 }
915 if (*w == ',')
916 w += 1;
917 wp = kzalloc(sizeof(*wp), GFP_KERNEL);
918 if (!wp) {
919 NS_ERR("unable to allocate memory.\n");
920 return -ENOMEM;
921 }
922 wp->page_no = page_no;
923 wp->max_writes = max_writes;
924 list_add(&wp->list, &weak_pages);
925 } while (*w);
926 return 0;
927}
928
929static int write_error(unsigned int page_no)
930{
931 struct weak_page *wp;
932
933 list_for_each_entry(wp, &weak_pages, list)
934 if (wp->page_no == page_no) {
935 if (wp->writes_done >= wp->max_writes)
936 return 1;
937 wp->writes_done += 1;
938 return 0;
939 }
940 return 0;
941}
942
943static int parse_gravepages(void)
944{
945 char *g;
946 int zero_ok;
947 unsigned int page_no;
948 unsigned int max_reads;
949 struct grave_page *gp;
950
951 if (!gravepages)
952 return 0;
953 g = gravepages;
954 do {
955 zero_ok = (*g == '0' ? 1 : 0);
956 page_no = simple_strtoul(g, &g, 0);
957 if (!zero_ok && !page_no) {
958 NS_ERR("invalid gravepagess.\n");
959 return -EINVAL;
960 }
961 max_reads = 3;
962 if (*g == ':') {
963 g += 1;
964 max_reads = simple_strtoul(g, &g, 0);
965 }
966 if (*g == ',')
967 g += 1;
968 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
969 if (!gp) {
970 NS_ERR("unable to allocate memory.\n");
971 return -ENOMEM;
972 }
973 gp->page_no = page_no;
974 gp->max_reads = max_reads;
975 list_add(&gp->list, &grave_pages);
976 } while (*g);
977 return 0;
978}
979
980static int read_error(unsigned int page_no)
981{
982 struct grave_page *gp;
983
984 list_for_each_entry(gp, &grave_pages, list)
985 if (gp->page_no == page_no) {
986 if (gp->reads_done >= gp->max_reads)
987 return 1;
988 gp->reads_done += 1;
989 return 0;
990 }
991 return 0;
992}
993
994static void free_lists(void)
995{
996 struct list_head *pos, *n;
997 list_for_each_safe(pos, n, &weak_blocks) {
998 list_del(pos);
999 kfree(list_entry(pos, struct weak_block, list));
1000 }
1001 list_for_each_safe(pos, n, &weak_pages) {
1002 list_del(pos);
1003 kfree(list_entry(pos, struct weak_page, list));
1004 }
1005 list_for_each_safe(pos, n, &grave_pages) {
1006 list_del(pos);
1007 kfree(list_entry(pos, struct grave_page, list));
1008 }
1009 kfree(erase_block_wear);
1010}
1011
1012static int setup_wear_reporting(struct mtd_info *mtd)
1013{
1014 size_t mem;
1015
1016 wear_eb_count = div_u64(mtd->size, mtd->erasesize);
1017 mem = wear_eb_count * sizeof(unsigned long);
1018 if (mem / sizeof(unsigned long) != wear_eb_count) {
1019 NS_ERR("Too many erase blocks for wear reporting\n");
1020 return -ENOMEM;
1021 }
1022 erase_block_wear = kzalloc(mem, GFP_KERNEL);
1023 if (!erase_block_wear) {
1024 NS_ERR("Too many erase blocks for wear reporting\n");
1025 return -ENOMEM;
1026 }
1027 return 0;
1028}
1029
1030static void update_wear(unsigned int erase_block_no)
1031{
1032 if (!erase_block_wear)
1033 return;
1034 total_wear += 1;
1035
1036
1037
1038
1039 if (total_wear == 0)
1040 NS_ERR("Erase counter total overflow\n");
1041 erase_block_wear[erase_block_no] += 1;
1042 if (erase_block_wear[erase_block_no] == 0)
1043 NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
1044}
1045
1046
1047
1048
1049static char *get_state_name(uint32_t state)
1050{
1051 switch (NS_STATE(state)) {
1052 case STATE_CMD_READ0:
1053 return "STATE_CMD_READ0";
1054 case STATE_CMD_READ1:
1055 return "STATE_CMD_READ1";
1056 case STATE_CMD_PAGEPROG:
1057 return "STATE_CMD_PAGEPROG";
1058 case STATE_CMD_READOOB:
1059 return "STATE_CMD_READOOB";
1060 case STATE_CMD_READSTART:
1061 return "STATE_CMD_READSTART";
1062 case STATE_CMD_ERASE1:
1063 return "STATE_CMD_ERASE1";
1064 case STATE_CMD_STATUS:
1065 return "STATE_CMD_STATUS";
1066 case STATE_CMD_SEQIN:
1067 return "STATE_CMD_SEQIN";
1068 case STATE_CMD_READID:
1069 return "STATE_CMD_READID";
1070 case STATE_CMD_ERASE2:
1071 return "STATE_CMD_ERASE2";
1072 case STATE_CMD_RESET:
1073 return "STATE_CMD_RESET";
1074 case STATE_CMD_RNDOUT:
1075 return "STATE_CMD_RNDOUT";
1076 case STATE_CMD_RNDOUTSTART:
1077 return "STATE_CMD_RNDOUTSTART";
1078 case STATE_ADDR_PAGE:
1079 return "STATE_ADDR_PAGE";
1080 case STATE_ADDR_SEC:
1081 return "STATE_ADDR_SEC";
1082 case STATE_ADDR_ZERO:
1083 return "STATE_ADDR_ZERO";
1084 case STATE_ADDR_COLUMN:
1085 return "STATE_ADDR_COLUMN";
1086 case STATE_DATAIN:
1087 return "STATE_DATAIN";
1088 case STATE_DATAOUT:
1089 return "STATE_DATAOUT";
1090 case STATE_DATAOUT_ID:
1091 return "STATE_DATAOUT_ID";
1092 case STATE_DATAOUT_STATUS:
1093 return "STATE_DATAOUT_STATUS";
1094 case STATE_DATAOUT_STATUS_M:
1095 return "STATE_DATAOUT_STATUS_M";
1096 case STATE_READY:
1097 return "STATE_READY";
1098 case STATE_UNKNOWN:
1099 return "STATE_UNKNOWN";
1100 }
1101
1102 NS_ERR("get_state_name: unknown state, BUG\n");
1103 return NULL;
1104}
1105
1106
1107
1108
1109
1110
1111static int check_command(int cmd)
1112{
1113 switch (cmd) {
1114
1115 case NAND_CMD_READ0:
1116 case NAND_CMD_READ1:
1117 case NAND_CMD_READSTART:
1118 case NAND_CMD_PAGEPROG:
1119 case NAND_CMD_READOOB:
1120 case NAND_CMD_ERASE1:
1121 case NAND_CMD_STATUS:
1122 case NAND_CMD_SEQIN:
1123 case NAND_CMD_READID:
1124 case NAND_CMD_ERASE2:
1125 case NAND_CMD_RESET:
1126 case NAND_CMD_RNDOUT:
1127 case NAND_CMD_RNDOUTSTART:
1128 return 0;
1129
1130 default:
1131 return 1;
1132 }
1133}
1134
1135
1136
1137
1138static uint32_t get_state_by_command(unsigned command)
1139{
1140 switch (command) {
1141 case NAND_CMD_READ0:
1142 return STATE_CMD_READ0;
1143 case NAND_CMD_READ1:
1144 return STATE_CMD_READ1;
1145 case NAND_CMD_PAGEPROG:
1146 return STATE_CMD_PAGEPROG;
1147 case NAND_CMD_READSTART:
1148 return STATE_CMD_READSTART;
1149 case NAND_CMD_READOOB:
1150 return STATE_CMD_READOOB;
1151 case NAND_CMD_ERASE1:
1152 return STATE_CMD_ERASE1;
1153 case NAND_CMD_STATUS:
1154 return STATE_CMD_STATUS;
1155 case NAND_CMD_SEQIN:
1156 return STATE_CMD_SEQIN;
1157 case NAND_CMD_READID:
1158 return STATE_CMD_READID;
1159 case NAND_CMD_ERASE2:
1160 return STATE_CMD_ERASE2;
1161 case NAND_CMD_RESET:
1162 return STATE_CMD_RESET;
1163 case NAND_CMD_RNDOUT:
1164 return STATE_CMD_RNDOUT;
1165 case NAND_CMD_RNDOUTSTART:
1166 return STATE_CMD_RNDOUTSTART;
1167 }
1168
1169 NS_ERR("get_state_by_command: unknown command, BUG\n");
1170 return 0;
1171}
1172
1173
1174
1175
1176static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
1177{
1178 uint byte = (uint)bt;
1179
1180 if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
1181 ns->regs.column |= (byte << 8 * ns->regs.count);
1182 else {
1183 ns->regs.row |= (byte << 8 * (ns->regs.count -
1184 ns->geom.pgaddrbytes +
1185 ns->geom.secaddrbytes));
1186 }
1187
1188 return;
1189}
1190
1191
1192
1193
1194static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1195{
1196 NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
1197
1198 ns->state = STATE_READY;
1199 ns->nxstate = STATE_UNKNOWN;
1200 ns->op = NULL;
1201 ns->npstates = 0;
1202 ns->stateidx = 0;
1203 ns->regs.num = 0;
1204 ns->regs.count = 0;
1205 ns->regs.off = 0;
1206 ns->regs.row = 0;
1207 ns->regs.column = 0;
1208 ns->regs.status = status;
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253static int find_operation(struct nandsim *ns, uint32_t flag)
1254{
1255 int opsfound = 0;
1256 int i, j, idx = 0;
1257
1258 for (i = 0; i < NS_OPER_NUM; i++) {
1259
1260 int found = 1;
1261
1262 if (!(ns->options & ops[i].reqopts))
1263
1264 continue;
1265
1266 if (flag) {
1267 if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
1268 continue;
1269 } else {
1270 if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
1271 continue;
1272 }
1273
1274 for (j = 0; j < ns->npstates; j++)
1275 if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
1276 && (ns->options & ops[idx].reqopts)) {
1277 found = 0;
1278 break;
1279 }
1280
1281 if (found) {
1282 idx = i;
1283 opsfound += 1;
1284 }
1285 }
1286
1287 if (opsfound == 1) {
1288
1289 ns->op = &ops[idx].states[0];
1290 if (flag) {
1291
1292
1293
1294
1295
1296
1297
1298 ns->stateidx = ns->npstates - 1;
1299 } else {
1300 ns->stateidx = ns->npstates;
1301 }
1302 ns->npstates = 0;
1303 ns->state = ns->op[ns->stateidx];
1304 ns->nxstate = ns->op[ns->stateidx + 1];
1305 NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
1306 idx, get_state_name(ns->state), get_state_name(ns->nxstate));
1307 return 0;
1308 }
1309
1310 if (opsfound == 0) {
1311
1312 if (ns->npstates != 0) {
1313 NS_DBG("find_operation: no operation found, try again with state %s\n",
1314 get_state_name(ns->state));
1315 ns->npstates = 0;
1316 return find_operation(ns, 0);
1317
1318 }
1319 NS_DBG("find_operation: no operations found\n");
1320 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1321 return -2;
1322 }
1323
1324 if (flag) {
1325
1326 NS_DBG("find_operation: BUG, operation must be known if address is input\n");
1327 return -2;
1328 }
1329
1330 NS_DBG("find_operation: there is still ambiguity\n");
1331
1332 ns->pstates[ns->npstates++] = ns->state;
1333
1334 return -1;
1335}
1336
1337static void put_pages(struct nandsim *ns)
1338{
1339 int i;
1340
1341 for (i = 0; i < ns->held_cnt; i++)
1342 page_cache_release(ns->held_pages[i]);
1343}
1344
1345
1346static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
1347{
1348 pgoff_t index, start_index, end_index;
1349 struct page *page;
1350 struct address_space *mapping = file->f_mapping;
1351
1352 start_index = pos >> PAGE_CACHE_SHIFT;
1353 end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
1354 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
1355 return -EINVAL;
1356 ns->held_cnt = 0;
1357 for (index = start_index; index <= end_index; index++) {
1358 page = find_get_page(mapping, index);
1359 if (page == NULL) {
1360 page = find_or_create_page(mapping, index, GFP_NOFS);
1361 if (page == NULL) {
1362 write_inode_now(mapping->host, 1);
1363 page = find_or_create_page(mapping, index, GFP_NOFS);
1364 }
1365 if (page == NULL) {
1366 put_pages(ns);
1367 return -ENOMEM;
1368 }
1369 unlock_page(page);
1370 }
1371 ns->held_pages[ns->held_cnt++] = page;
1372 }
1373 return 0;
1374}
1375
1376static int set_memalloc(void)
1377{
1378 if (current->flags & PF_MEMALLOC)
1379 return 0;
1380 current->flags |= PF_MEMALLOC;
1381 return 1;
1382}
1383
1384static void clear_memalloc(int memalloc)
1385{
1386 if (memalloc)
1387 current->flags &= ~PF_MEMALLOC;
1388}
1389
1390static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
1391{
1392 ssize_t tx;
1393 int err, memalloc;
1394
1395 err = get_pages(ns, file, count, pos);
1396 if (err)
1397 return err;
1398 memalloc = set_memalloc();
1399 tx = kernel_read(file, pos, buf, count);
1400 clear_memalloc(memalloc);
1401 put_pages(ns);
1402 return tx;
1403}
1404
1405static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
1406{
1407 ssize_t tx;
1408 int err, memalloc;
1409
1410 err = get_pages(ns, file, count, pos);
1411 if (err)
1412 return err;
1413 memalloc = set_memalloc();
1414 tx = kernel_write(file, buf, count, pos);
1415 clear_memalloc(memalloc);
1416 put_pages(ns);
1417 return tx;
1418}
1419
1420
1421
1422
1423static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
1424{
1425 return &(ns->pages[ns->regs.row]);
1426}
1427
1428
1429
1430
1431static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
1432{
1433 return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
1434}
1435
1436static int do_read_error(struct nandsim *ns, int num)
1437{
1438 unsigned int page_no = ns->regs.row;
1439
1440 if (read_error(page_no)) {
1441 prandom_bytes(ns->buf.byte, num);
1442 NS_WARN("simulating read error in page %u\n", page_no);
1443 return 1;
1444 }
1445 return 0;
1446}
1447
1448static void do_bit_flips(struct nandsim *ns, int num)
1449{
1450 if (bitflips && prandom_u32() < (1 << 22)) {
1451 int flips = 1;
1452 if (bitflips > 1)
1453 flips = (prandom_u32() % (int) bitflips) + 1;
1454 while (flips--) {
1455 int pos = prandom_u32() % (num * 8);
1456 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
1457 NS_WARN("read_page: flipping bit %d in page %d "
1458 "reading from %d ecc: corrected=%u failed=%u\n",
1459 pos, ns->regs.row, ns->regs.column + ns->regs.off,
1460 nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
1461 }
1462 }
1463}
1464
1465
1466
1467
1468static void read_page(struct nandsim *ns, int num)
1469{
1470 union ns_mem *mypage;
1471
1472 if (ns->cfile) {
1473 if (!test_bit(ns->regs.row, ns->pages_written)) {
1474 NS_DBG("read_page: page %d not written\n", ns->regs.row);
1475 memset(ns->buf.byte, 0xFF, num);
1476 } else {
1477 loff_t pos;
1478 ssize_t tx;
1479
1480 NS_DBG("read_page: page %d written, reading from %d\n",
1481 ns->regs.row, ns->regs.column + ns->regs.off);
1482 if (do_read_error(ns, num))
1483 return;
1484 pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
1485 tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
1486 if (tx != num) {
1487 NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1488 return;
1489 }
1490 do_bit_flips(ns, num);
1491 }
1492 return;
1493 }
1494
1495 mypage = NS_GET_PAGE(ns);
1496 if (mypage->byte == NULL) {
1497 NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
1498 memset(ns->buf.byte, 0xFF, num);
1499 } else {
1500 NS_DBG("read_page: page %d allocated, reading from %d\n",
1501 ns->regs.row, ns->regs.column + ns->regs.off);
1502 if (do_read_error(ns, num))
1503 return;
1504 memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
1505 do_bit_flips(ns, num);
1506 }
1507}
1508
1509
1510
1511
1512static void erase_sector(struct nandsim *ns)
1513{
1514 union ns_mem *mypage;
1515 int i;
1516
1517 if (ns->cfile) {
1518 for (i = 0; i < ns->geom.pgsec; i++)
1519 if (__test_and_clear_bit(ns->regs.row + i,
1520 ns->pages_written)) {
1521 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
1522 }
1523 return;
1524 }
1525
1526 mypage = NS_GET_PAGE(ns);
1527 for (i = 0; i < ns->geom.pgsec; i++) {
1528 if (mypage->byte != NULL) {
1529 NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
1530 kmem_cache_free(ns->nand_pages_slab, mypage->byte);
1531 mypage->byte = NULL;
1532 }
1533 mypage++;
1534 }
1535}
1536
1537
1538
1539
1540static int prog_page(struct nandsim *ns, int num)
1541{
1542 int i;
1543 union ns_mem *mypage;
1544 u_char *pg_off;
1545
1546 if (ns->cfile) {
1547 loff_t off;
1548 ssize_t tx;
1549 int all;
1550
1551 NS_DBG("prog_page: writing page %d\n", ns->regs.row);
1552 pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
1553 off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
1554 if (!test_bit(ns->regs.row, ns->pages_written)) {
1555 all = 1;
1556 memset(ns->file_buf, 0xff, ns->geom.pgszoob);
1557 } else {
1558 all = 0;
1559 tx = read_file(ns, ns->cfile, pg_off, num, off);
1560 if (tx != num) {
1561 NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
1562 return -1;
1563 }
1564 }
1565 for (i = 0; i < num; i++)
1566 pg_off[i] &= ns->buf.byte[i];
1567 if (all) {
1568 loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
1569 tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
1570 if (tx != ns->geom.pgszoob) {
1571 NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1572 return -1;
1573 }
1574 __set_bit(ns->regs.row, ns->pages_written);
1575 } else {
1576 tx = write_file(ns, ns->cfile, pg_off, num, off);
1577 if (tx != num) {
1578 NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
1579 return -1;
1580 }
1581 }
1582 return 0;
1583 }
1584
1585 mypage = NS_GET_PAGE(ns);
1586 if (mypage->byte == NULL) {
1587 NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
1588
1589
1590
1591
1592
1593
1594 mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
1595 if (mypage->byte == NULL) {
1596 NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
1597 return -1;
1598 }
1599 memset(mypage->byte, 0xFF, ns->geom.pgszoob);
1600 }
1601
1602 pg_off = NS_PAGE_BYTE_OFF(ns);
1603 for (i = 0; i < num; i++)
1604 pg_off[i] &= ns->buf.byte[i];
1605
1606 return 0;
1607}
1608
1609
1610
1611
1612
1613
1614static int do_state_action(struct nandsim *ns, uint32_t action)
1615{
1616 int num;
1617 int busdiv = ns->busw == 8 ? 1 : 2;
1618 unsigned int erase_block_no, page_no;
1619
1620 action &= ACTION_MASK;
1621
1622
1623 if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
1624 NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
1625 return -1;
1626 }
1627
1628 switch (action) {
1629
1630 case ACTION_CPY:
1631
1632
1633
1634
1635
1636 if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
1637 NS_ERR("do_state_action: column number is too large\n");
1638 break;
1639 }
1640 num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1641 read_page(ns, num);
1642
1643 NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
1644 num, NS_RAW_OFFSET(ns) + ns->regs.off);
1645
1646 if (ns->regs.off == 0)
1647 NS_LOG("read page %d\n", ns->regs.row);
1648 else if (ns->regs.off < ns->geom.pgsz)
1649 NS_LOG("read page %d (second half)\n", ns->regs.row);
1650 else
1651 NS_LOG("read OOB of page %d\n", ns->regs.row);
1652
1653 NS_UDELAY(access_delay);
1654 NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
1655
1656 break;
1657
1658 case ACTION_SECERASE:
1659
1660
1661
1662
1663 if (ns->lines.wp) {
1664 NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
1665 return -1;
1666 }
1667
1668 if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
1669 || (ns->regs.row & ~(ns->geom.secsz - 1))) {
1670 NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
1671 return -1;
1672 }
1673
1674 ns->regs.row = (ns->regs.row <<
1675 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
1676 ns->regs.column = 0;
1677
1678 erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
1679
1680 NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
1681 ns->regs.row, NS_RAW_OFFSET(ns));
1682 NS_LOG("erase sector %u\n", erase_block_no);
1683
1684 erase_sector(ns);
1685
1686 NS_MDELAY(erase_delay);
1687
1688 if (erase_block_wear)
1689 update_wear(erase_block_no);
1690
1691 if (erase_error(erase_block_no)) {
1692 NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
1693 return -1;
1694 }
1695
1696 break;
1697
1698 case ACTION_PRGPAGE:
1699
1700
1701
1702
1703 if (ns->lines.wp) {
1704 NS_WARN("do_state_action: device is write-protected, programm\n");
1705 return -1;
1706 }
1707
1708 num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1709 if (num != ns->regs.count) {
1710 NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
1711 ns->regs.count, num);
1712 return -1;
1713 }
1714
1715 if (prog_page(ns, num) == -1)
1716 return -1;
1717
1718 page_no = ns->regs.row;
1719
1720 NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
1721 num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
1722 NS_LOG("programm page %d\n", ns->regs.row);
1723
1724 NS_UDELAY(programm_delay);
1725 NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
1726
1727 if (write_error(page_no)) {
1728 NS_WARN("simulating write failure in page %u\n", page_no);
1729 return -1;
1730 }
1731
1732 break;
1733
1734 case ACTION_ZEROOFF:
1735 NS_DBG("do_state_action: set internal offset to 0\n");
1736 ns->regs.off = 0;
1737 break;
1738
1739 case ACTION_HALFOFF:
1740 if (!(ns->options & OPT_PAGE512_8BIT)) {
1741 NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
1742 "byte page size 8x chips\n");
1743 return -1;
1744 }
1745 NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
1746 ns->regs.off = ns->geom.pgsz/2;
1747 break;
1748
1749 case ACTION_OOBOFF:
1750 NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
1751 ns->regs.off = ns->geom.pgsz;
1752 break;
1753
1754 default:
1755 NS_DBG("do_state_action: BUG! unknown action\n");
1756 }
1757
1758 return 0;
1759}
1760
1761
1762
1763
1764static void switch_state(struct nandsim *ns)
1765{
1766 if (ns->op) {
1767
1768
1769
1770
1771
1772 ns->stateidx += 1;
1773 ns->state = ns->nxstate;
1774 ns->nxstate = ns->op[ns->stateidx + 1];
1775
1776 NS_DBG("switch_state: operation is known, switch to the next state, "
1777 "state: %s, nxstate: %s\n",
1778 get_state_name(ns->state), get_state_name(ns->nxstate));
1779
1780
1781 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1782 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1783 return;
1784 }
1785
1786 } else {
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796 ns->state = get_state_by_command(ns->regs.command);
1797
1798 NS_DBG("switch_state: operation is unknown, try to find it\n");
1799
1800 if (find_operation(ns, 0) != 0)
1801 return;
1802
1803 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
1804 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
1805 return;
1806 }
1807 }
1808
1809
1810 if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
1811 NS_DBG("switch_state: double the column number for 16x device\n");
1812 ns->regs.column <<= 1;
1813 }
1814
1815 if (NS_STATE(ns->nxstate) == STATE_READY) {
1816
1817
1818
1819
1820 u_char status = NS_STATUS_OK(ns);
1821
1822
1823 if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
1824 && ns->regs.count != ns->regs.num) {
1825 NS_WARN("switch_state: not all bytes were processed, %d left\n",
1826 ns->regs.num - ns->regs.count);
1827 status = NS_STATUS_FAILED(ns);
1828 }
1829
1830 NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
1831
1832 switch_to_ready_state(ns, status);
1833
1834 return;
1835 } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
1836
1837
1838
1839
1840 ns->state = ns->nxstate;
1841 ns->nxstate = ns->op[++ns->stateidx + 1];
1842 ns->regs.num = ns->regs.count = 0;
1843
1844 NS_DBG("switch_state: the next state is data I/O, switch, "
1845 "state: %s, nxstate: %s\n",
1846 get_state_name(ns->state), get_state_name(ns->nxstate));
1847
1848
1849
1850
1851
1852 switch (NS_STATE(ns->state)) {
1853 case STATE_DATAIN:
1854 case STATE_DATAOUT:
1855 ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
1856 break;
1857
1858 case STATE_DATAOUT_ID:
1859 ns->regs.num = ns->geom.idbytes;
1860 break;
1861
1862 case STATE_DATAOUT_STATUS:
1863 case STATE_DATAOUT_STATUS_M:
1864 ns->regs.count = ns->regs.num = 0;
1865 break;
1866
1867 default:
1868 NS_ERR("switch_state: BUG! unknown data state\n");
1869 }
1870
1871 } else if (ns->nxstate & STATE_ADDR_MASK) {
1872
1873
1874
1875
1876
1877 ns->regs.count = 0;
1878
1879 switch (NS_STATE(ns->nxstate)) {
1880 case STATE_ADDR_PAGE:
1881 ns->regs.num = ns->geom.pgaddrbytes;
1882
1883 break;
1884 case STATE_ADDR_SEC:
1885 ns->regs.num = ns->geom.secaddrbytes;
1886 break;
1887
1888 case STATE_ADDR_ZERO:
1889 ns->regs.num = 1;
1890 break;
1891
1892 case STATE_ADDR_COLUMN:
1893
1894 ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
1895 break;
1896
1897 default:
1898 NS_ERR("switch_state: BUG! unknown address state\n");
1899 }
1900 } else {
1901
1902
1903
1904
1905 ns->regs.num = 0;
1906 ns->regs.count = 0;
1907 }
1908}
1909
1910static u_char ns_nand_read_byte(struct mtd_info *mtd)
1911{
1912 struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
1913 u_char outb = 0x00;
1914
1915
1916 if (!ns->lines.ce) {
1917 NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
1918 return outb;
1919 }
1920 if (ns->lines.ale || ns->lines.cle) {
1921 NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
1922 return outb;
1923 }
1924 if (!(ns->state & STATE_DATAOUT_MASK)) {
1925 NS_WARN("read_byte: unexpected data output cycle, state is %s "
1926 "return %#x\n", get_state_name(ns->state), (uint)outb);
1927 return outb;
1928 }
1929
1930
1931 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
1932 NS_DBG("read_byte: return %#x status\n", ns->regs.status);
1933 return ns->regs.status;
1934 }
1935
1936
1937 if (ns->regs.count == ns->regs.num) {
1938 NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
1939 return outb;
1940 }
1941
1942 switch (NS_STATE(ns->state)) {
1943 case STATE_DATAOUT:
1944 if (ns->busw == 8) {
1945 outb = ns->buf.byte[ns->regs.count];
1946 ns->regs.count += 1;
1947 } else {
1948 outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
1949 ns->regs.count += 2;
1950 }
1951 break;
1952 case STATE_DATAOUT_ID:
1953 NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
1954 outb = ns->ids[ns->regs.count];
1955 ns->regs.count += 1;
1956 break;
1957 default:
1958 BUG();
1959 }
1960
1961 if (ns->regs.count == ns->regs.num) {
1962 NS_DBG("read_byte: all bytes were read\n");
1963
1964 if (NS_STATE(ns->nxstate) == STATE_READY)
1965 switch_state(ns);
1966 }
1967
1968 return outb;
1969}
1970
1971static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
1972{
1973 struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
1974
1975
1976 if (!ns->lines.ce) {
1977 NS_ERR("write_byte: chip is disabled, ignore write\n");
1978 return;
1979 }
1980 if (ns->lines.ale && ns->lines.cle) {
1981 NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
1982 return;
1983 }
1984
1985 if (ns->lines.cle == 1) {
1986
1987
1988
1989
1990 if (byte == NAND_CMD_RESET) {
1991 NS_LOG("reset chip\n");
1992 switch_to_ready_state(ns, NS_STATUS_OK(ns));
1993 return;
1994 }
1995
1996
1997 if (check_command(byte)) {
1998 NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
1999 return;
2000 }
2001
2002 if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
2003 || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
2004 || NS_STATE(ns->state) == STATE_DATAOUT) {
2005 int row = ns->regs.row;
2006
2007 switch_state(ns);
2008 if (byte == NAND_CMD_RNDOUT)
2009 ns->regs.row = row;
2010 }
2011
2012
2013 if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
2014
2015 if (!(ns->regs.command == NAND_CMD_READID &&
2016 NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
2017
2018
2019
2020
2021
2022 NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
2023 "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
2024 }
2025 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2026 }
2027
2028 NS_DBG("command byte corresponding to %s state accepted\n",
2029 get_state_name(get_state_by_command(byte)));
2030 ns->regs.command = byte;
2031 switch_state(ns);
2032
2033 } else if (ns->lines.ale == 1) {
2034
2035
2036
2037
2038 if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
2039
2040 NS_DBG("write_byte: operation isn't known yet, identify it\n");
2041
2042 if (find_operation(ns, 1) < 0)
2043 return;
2044
2045 if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
2046 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2047 return;
2048 }
2049
2050 ns->regs.count = 0;
2051 switch (NS_STATE(ns->nxstate)) {
2052 case STATE_ADDR_PAGE:
2053 ns->regs.num = ns->geom.pgaddrbytes;
2054 break;
2055 case STATE_ADDR_SEC:
2056 ns->regs.num = ns->geom.secaddrbytes;
2057 break;
2058 case STATE_ADDR_ZERO:
2059 ns->regs.num = 1;
2060 break;
2061 default:
2062 BUG();
2063 }
2064 }
2065
2066
2067 if (!(ns->nxstate & STATE_ADDR_MASK)) {
2068 NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
2069 "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
2070 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2071 return;
2072 }
2073
2074
2075 if (ns->regs.count == ns->regs.num) {
2076 NS_ERR("write_byte: no more address bytes expected\n");
2077 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2078 return;
2079 }
2080
2081 accept_addr_byte(ns, byte);
2082
2083 ns->regs.count += 1;
2084
2085 NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
2086 (uint)byte, ns->regs.count, ns->regs.num);
2087
2088 if (ns->regs.count == ns->regs.num) {
2089 NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
2090 switch_state(ns);
2091 }
2092
2093 } else {
2094
2095
2096
2097
2098
2099 if (!(ns->state & STATE_DATAIN_MASK)) {
2100 NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
2101 "switch to %s\n", (uint)byte,
2102 get_state_name(ns->state), get_state_name(STATE_READY));
2103 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2104 return;
2105 }
2106
2107
2108 if (ns->regs.count == ns->regs.num) {
2109 NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
2110 ns->regs.num);
2111 return;
2112 }
2113
2114 if (ns->busw == 8) {
2115 ns->buf.byte[ns->regs.count] = byte;
2116 ns->regs.count += 1;
2117 } else {
2118 ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
2119 ns->regs.count += 2;
2120 }
2121 }
2122
2123 return;
2124}
2125
2126static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
2127{
2128 struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
2129
2130 ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
2131 ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
2132 ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
2133
2134 if (cmd != NAND_CMD_NONE)
2135 ns_nand_write_byte(mtd, cmd);
2136}
2137
2138static int ns_device_ready(struct mtd_info *mtd)
2139{
2140 NS_DBG("device_ready\n");
2141 return 1;
2142}
2143
2144static uint16_t ns_nand_read_word(struct mtd_info *mtd)
2145{
2146 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2147
2148 NS_DBG("read_word\n");
2149
2150 return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
2151}
2152
2153static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
2154{
2155 struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
2156
2157
2158 if (!(ns->state & STATE_DATAIN_MASK)) {
2159 NS_ERR("write_buf: data input isn't expected, state is %s, "
2160 "switch to STATE_READY\n", get_state_name(ns->state));
2161 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2162 return;
2163 }
2164
2165
2166 if (ns->regs.count + len > ns->regs.num) {
2167 NS_ERR("write_buf: too many input bytes\n");
2168 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2169 return;
2170 }
2171
2172 memcpy(ns->buf.byte + ns->regs.count, buf, len);
2173 ns->regs.count += len;
2174
2175 if (ns->regs.count == ns->regs.num) {
2176 NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
2177 }
2178}
2179
2180static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
2181{
2182 struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
2183
2184
2185 if (!ns->lines.ce) {
2186 NS_ERR("read_buf: chip is disabled\n");
2187 return;
2188 }
2189 if (ns->lines.ale || ns->lines.cle) {
2190 NS_ERR("read_buf: ALE or CLE pin is high\n");
2191 return;
2192 }
2193 if (!(ns->state & STATE_DATAOUT_MASK)) {
2194 NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
2195 get_state_name(ns->state));
2196 return;
2197 }
2198
2199 if (NS_STATE(ns->state) != STATE_DATAOUT) {
2200 int i;
2201
2202 for (i = 0; i < len; i++)
2203 buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
2204
2205 return;
2206 }
2207
2208
2209 if (ns->regs.count + len > ns->regs.num) {
2210 NS_ERR("read_buf: too many bytes to read\n");
2211 switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
2212 return;
2213 }
2214
2215 memcpy(buf, ns->buf.byte + ns->regs.count, len);
2216 ns->regs.count += len;
2217
2218 if (ns->regs.count == ns->regs.num) {
2219 if (NS_STATE(ns->nxstate) == STATE_READY)
2220 switch_state(ns);
2221 }
2222
2223 return;
2224}
2225
2226
2227
2228
2229static int __init ns_init_module(void)
2230{
2231 struct nand_chip *chip;
2232 struct nandsim *nand;
2233 int retval = -ENOMEM, i;
2234
2235 if (bus_width != 8 && bus_width != 16) {
2236 NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
2237 return -EINVAL;
2238 }
2239
2240
2241 nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
2242 + sizeof(struct nandsim), GFP_KERNEL);
2243 if (!nsmtd) {
2244 NS_ERR("unable to allocate core structures.\n");
2245 return -ENOMEM;
2246 }
2247 chip = (struct nand_chip *)(nsmtd + 1);
2248 nsmtd->priv = (void *)chip;
2249 nand = (struct nandsim *)(chip + 1);
2250 chip->priv = (void *)nand;
2251
2252
2253
2254
2255 chip->cmd_ctrl = ns_hwcontrol;
2256 chip->read_byte = ns_nand_read_byte;
2257 chip->dev_ready = ns_device_ready;
2258 chip->write_buf = ns_nand_write_buf;
2259 chip->read_buf = ns_nand_read_buf;
2260 chip->read_word = ns_nand_read_word;
2261 chip->ecc.mode = NAND_ECC_SOFT;
2262
2263
2264 chip->options |= NAND_SKIP_BBTSCAN;
2265
2266 switch (bbt) {
2267 case 2:
2268 chip->bbt_options |= NAND_BBT_NO_OOB;
2269 case 1:
2270 chip->bbt_options |= NAND_BBT_USE_FLASH;
2271 case 0:
2272 break;
2273 default:
2274 NS_ERR("bbt has to be 0..2\n");
2275 retval = -EINVAL;
2276 goto error;
2277 }
2278
2279
2280
2281
2282 if (third_id_byte != 0xFF || fourth_id_byte != 0xFF)
2283 nand->geom.idbytes = 4;
2284 else
2285 nand->geom.idbytes = 2;
2286 nand->regs.status = NS_STATUS_OK(nand);
2287 nand->nxstate = STATE_UNKNOWN;
2288 nand->options |= OPT_PAGE512;
2289 nand->ids[0] = first_id_byte;
2290 nand->ids[1] = second_id_byte;
2291 nand->ids[2] = third_id_byte;
2292 nand->ids[3] = fourth_id_byte;
2293 if (bus_width == 16) {
2294 nand->busw = 16;
2295 chip->options |= NAND_BUSWIDTH_16;
2296 }
2297
2298 nsmtd->owner = THIS_MODULE;
2299
2300 if ((retval = parse_weakblocks()) != 0)
2301 goto error;
2302
2303 if ((retval = parse_weakpages()) != 0)
2304 goto error;
2305
2306 if ((retval = parse_gravepages()) != 0)
2307 goto error;
2308
2309 retval = nand_scan_ident(nsmtd, 1, NULL);
2310 if (retval) {
2311 NS_ERR("cannot scan NAND Simulator device\n");
2312 if (retval > 0)
2313 retval = -ENXIO;
2314 goto error;
2315 }
2316
2317 if (bch) {
2318 unsigned int eccsteps, eccbytes;
2319 if (!mtd_nand_has_bch()) {
2320 NS_ERR("BCH ECC support is disabled\n");
2321 retval = -EINVAL;
2322 goto error;
2323 }
2324
2325 eccsteps = nsmtd->writesize/512;
2326 eccbytes = (bch*13+7)/8;
2327
2328 if ((nsmtd->oobsize < 64) || !eccsteps) {
2329 NS_ERR("bch not available on small page devices\n");
2330 retval = -EINVAL;
2331 goto error;
2332 }
2333 if ((eccbytes*eccsteps+2) > nsmtd->oobsize) {
2334 NS_ERR("invalid bch value %u\n", bch);
2335 retval = -EINVAL;
2336 goto error;
2337 }
2338 chip->ecc.mode = NAND_ECC_SOFT_BCH;
2339 chip->ecc.size = 512;
2340 chip->ecc.bytes = eccbytes;
2341 NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
2342 }
2343
2344 retval = nand_scan_tail(nsmtd);
2345 if (retval) {
2346 NS_ERR("can't register NAND Simulator\n");
2347 if (retval > 0)
2348 retval = -ENXIO;
2349 goto error;
2350 }
2351
2352 if (overridesize) {
2353 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
2354 if (new_size >> overridesize != nsmtd->erasesize) {
2355 NS_ERR("overridesize is too big\n");
2356 retval = -EINVAL;
2357 goto err_exit;
2358 }
2359
2360 nsmtd->size = new_size;
2361 chip->chipsize = new_size;
2362 chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
2363 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
2364 }
2365
2366 if ((retval = setup_wear_reporting(nsmtd)) != 0)
2367 goto err_exit;
2368
2369 if ((retval = nandsim_debugfs_create(nand)) != 0)
2370 goto err_exit;
2371
2372 if ((retval = init_nandsim(nsmtd)) != 0)
2373 goto err_exit;
2374
2375 if ((retval = chip->scan_bbt(nsmtd)) != 0)
2376 goto err_exit;
2377
2378 if ((retval = parse_badblocks(nand, nsmtd)) != 0)
2379 goto err_exit;
2380
2381
2382 retval = mtd_device_register(nsmtd, &nand->partitions[0],
2383 nand->nbparts);
2384 if (retval != 0)
2385 goto err_exit;
2386
2387 return 0;
2388
2389err_exit:
2390 free_nandsim(nand);
2391 nand_release(nsmtd);
2392 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
2393 kfree(nand->partitions[i].name);
2394error:
2395 kfree(nsmtd);
2396 free_lists();
2397
2398 return retval;
2399}
2400
2401module_init(ns_init_module);
2402
2403
2404
2405
2406static void __exit ns_cleanup_module(void)
2407{
2408 struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
2409 int i;
2410
2411 nandsim_debugfs_remove(ns);
2412 free_nandsim(ns);
2413 nand_release(nsmtd);
2414 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
2415 kfree(ns->partitions[i].name);
2416 kfree(nsmtd);
2417 free_lists();
2418}
2419
2420module_exit(ns_cleanup_module);
2421
2422MODULE_LICENSE ("GPL");
2423MODULE_AUTHOR ("Artem B. Bityuckiy");
2424MODULE_DESCRIPTION ("The NAND flash simulator");
2425