1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97#include "libbb.h"
98#include <mntent.h>
99
100#include "minix.h"
101
102
103#if 1
104# define CUR_TIME 0
105# define GETUID 0
106# define GETGID 0
107#else
108
109# define CUR_TIME time(NULL)
110# define GETUID getuid()
111# define GETGID getgid()
112#endif
113
114enum {
115 MAX_GOOD_BLOCKS = 512,
116 TEST_BUFFER_BLOCKS = 16,
117};
118
119#if !ENABLE_FEATURE_MINIX2
120enum { version2 = 0 };
121#endif
122
123enum { dev_fd = 3 };
124
125struct globals {
126#if ENABLE_FEATURE_MINIX2
127 smallint version2;
128#define version2 G.version2
129#endif
130 char *device_name;
131 uint32_t total_blocks;
132 int badblocks;
133 int namelen;
134 int dirsize;
135 int magic;
136 char *inode_buffer;
137 char *inode_map;
138 char *zone_map;
139 int used_good_blocks;
140 unsigned long req_nr_inodes;
141 unsigned currently_testing;
142
143 char root_block[BLOCK_SIZE];
144 union {
145 char superblock_buffer[BLOCK_SIZE];
146 struct minix_superblock SB;
147 } u;
148 char boot_block_buffer[512];
149 unsigned short good_blocks_table[MAX_GOOD_BLOCKS];
150
151 char check_blocks_buffer[BLOCK_SIZE * TEST_BUFFER_BLOCKS];
152
153 unsigned short ind_block1[BLOCK_SIZE >> 1];
154 unsigned short dind_block1[BLOCK_SIZE >> 1];
155 unsigned long ind_block2[BLOCK_SIZE >> 2];
156 unsigned long dind_block2[BLOCK_SIZE >> 2];
157};
158#define G (*ptr_to_globals)
159#define INIT_G() do { \
160 SET_PTR_TO_GLOBALS(xzalloc(sizeof(G))); \
161} while (0)
162
163static ALWAYS_INLINE unsigned div_roundup(unsigned size, unsigned n)
164{
165 return (size + n-1) / n;
166}
167
168#define INODE_BUF1 (((struct minix1_inode*)G.inode_buffer) - 1)
169#define INODE_BUF2 (((struct minix2_inode*)G.inode_buffer) - 1)
170
171#define SB (G.u.SB)
172
173#define SB_INODES (SB.s_ninodes)
174#define SB_IMAPS (SB.s_imap_blocks)
175#define SB_ZMAPS (SB.s_zmap_blocks)
176#define SB_FIRSTZONE (SB.s_firstdatazone)
177#define SB_ZONE_SIZE (SB.s_log_zone_size)
178#define SB_MAXSIZE (SB.s_max_size)
179#define SB_MAGIC (SB.s_magic)
180
181#if !ENABLE_FEATURE_MINIX2
182# define SB_ZONES (SB.s_nzones)
183# define INODE_BLOCKS div_roundup(SB_INODES, MINIX1_INODES_PER_BLOCK)
184#else
185# define SB_ZONES (version2 ? SB.s_zones : SB.s_nzones)
186# define INODE_BLOCKS div_roundup(SB_INODES, \
187 (version2 ? MINIX2_INODES_PER_BLOCK : MINIX1_INODES_PER_BLOCK))
188#endif
189
190#define INODE_BUFFER_SIZE (INODE_BLOCKS * BLOCK_SIZE)
191#define NORM_FIRSTZONE (2 + SB_IMAPS + SB_ZMAPS + INODE_BLOCKS)
192
193
194
195
196static int minix_bit(const char* a, unsigned i)
197{
198 return a[i >> 3] & (1<<(i & 7));
199}
200
201static void minix_setbit(char *a, unsigned i)
202{
203 setbit(a, i);
204}
205static void minix_clrbit(char *a, unsigned i)
206{
207 clrbit(a, i);
208}
209
210
211#define zone_in_use(x) minix_bit(G.zone_map,(x)-SB_FIRSTZONE+1)
212
213
214#define mark_inode(x) minix_setbit(G.inode_map,(x))
215#define unmark_inode(x) minix_clrbit(G.inode_map,(x))
216#define mark_zone(x) minix_setbit(G.zone_map,(x)-SB_FIRSTZONE+1)
217#define unmark_zone(x) minix_clrbit(G.zone_map,(x)-SB_FIRSTZONE+1)
218
219#ifndef BLKGETSIZE
220# define BLKGETSIZE _IO(0x12,96)
221#endif
222
223static void write_tables(void)
224{
225
226 SB.s_state |= MINIX_VALID_FS;
227 SB.s_state &= ~MINIX_ERROR_FS;
228
229 msg_eol = "seek to 0 failed";
230 xlseek(dev_fd, 0, SEEK_SET);
231
232 msg_eol = "can't clear boot sector";
233 xwrite(dev_fd, G.boot_block_buffer, 512);
234
235 msg_eol = "seek to BLOCK_SIZE failed";
236 xlseek(dev_fd, BLOCK_SIZE, SEEK_SET);
237
238 msg_eol = "can't write superblock";
239 xwrite(dev_fd, G.u.superblock_buffer, BLOCK_SIZE);
240
241 msg_eol = "can't write inode map";
242 xwrite(dev_fd, G.inode_map, SB_IMAPS * BLOCK_SIZE);
243
244 msg_eol = "can't write zone map";
245 xwrite(dev_fd, G.zone_map, SB_ZMAPS * BLOCK_SIZE);
246
247 msg_eol = "can't write inodes";
248 xwrite(dev_fd, G.inode_buffer, INODE_BUFFER_SIZE);
249
250 msg_eol = "\n";
251}
252
253static void write_block(int blk, char *buffer)
254{
255 xlseek(dev_fd, blk * BLOCK_SIZE, SEEK_SET);
256 xwrite(dev_fd, buffer, BLOCK_SIZE);
257}
258
259static int get_free_block(void)
260{
261 int blk;
262
263 if (G.used_good_blocks + 1 >= MAX_GOOD_BLOCKS)
264 bb_simple_error_msg_and_die("too many bad blocks");
265 if (G.used_good_blocks)
266 blk = G.good_blocks_table[G.used_good_blocks - 1] + 1;
267 else
268 blk = SB_FIRSTZONE;
269 while (blk < SB_ZONES && zone_in_use(blk))
270 blk++;
271 if (blk >= SB_ZONES)
272 bb_simple_error_msg_and_die("not enough good blocks");
273 G.good_blocks_table[G.used_good_blocks] = blk;
274 G.used_good_blocks++;
275 return blk;
276}
277
278static void mark_good_blocks(void)
279{
280 int blk;
281
282 for (blk = 0; blk < G.used_good_blocks; blk++)
283 mark_zone(G.good_blocks_table[blk]);
284}
285
286static int next(int zone)
287{
288 if (!zone)
289 zone = SB_FIRSTZONE - 1;
290 while (++zone < SB_ZONES)
291 if (zone_in_use(zone))
292 return zone;
293 return 0;
294}
295
296static void make_bad_inode(void)
297{
298 struct minix1_inode *inode = &INODE_BUF1[MINIX_BAD_INO];
299 int i, j, zone;
300 int ind = 0, dind = 0;
301
302
303
304
305#define ind_block (G.ind_block1)
306#define dind_block (G.dind_block1)
307
308#define NEXT_BAD (zone = next(zone))
309
310 if (!G.badblocks)
311 return;
312 mark_inode(MINIX_BAD_INO);
313 inode->i_nlinks = 1;
314
315
316 inode->i_time = CUR_TIME;
317 inode->i_mode = S_IFREG + 0000;
318 inode->i_size = G.badblocks * BLOCK_SIZE;
319 zone = next(0);
320 for (i = 0; i < 7; i++) {
321 inode->i_zone[i] = zone;
322 if (!NEXT_BAD)
323 goto end_bad;
324 }
325 inode->i_zone[7] = ind = get_free_block();
326 memset(ind_block, 0, BLOCK_SIZE);
327 for (i = 0; i < 512; i++) {
328 ind_block[i] = zone;
329 if (!NEXT_BAD)
330 goto end_bad;
331 }
332 inode->i_zone[8] = dind = get_free_block();
333 memset(dind_block, 0, BLOCK_SIZE);
334 for (i = 0; i < 512; i++) {
335 write_block(ind, (char *) ind_block);
336 dind_block[i] = ind = get_free_block();
337 memset(ind_block, 0, BLOCK_SIZE);
338 for (j = 0; j < 512; j++) {
339 ind_block[j] = zone;
340 if (!NEXT_BAD)
341 goto end_bad;
342 }
343 }
344 bb_simple_error_msg_and_die("too many bad blocks");
345 end_bad:
346 if (ind)
347 write_block(ind, (char *) ind_block);
348 if (dind)
349 write_block(dind, (char *) dind_block);
350#undef ind_block
351#undef dind_block
352}
353
354#if ENABLE_FEATURE_MINIX2
355static void make_bad_inode2(void)
356{
357 struct minix2_inode *inode = &INODE_BUF2[MINIX_BAD_INO];
358 int i, j, zone;
359 int ind = 0, dind = 0;
360
361
362
363
364#define ind_block (G.ind_block2)
365#define dind_block (G.dind_block2)
366
367 if (!G.badblocks)
368 return;
369 mark_inode(MINIX_BAD_INO);
370 inode->i_nlinks = 1;
371 inode->i_atime = inode->i_mtime = inode->i_ctime = CUR_TIME;
372 inode->i_mode = S_IFREG + 0000;
373 inode->i_size = G.badblocks * BLOCK_SIZE;
374 zone = next(0);
375 for (i = 0; i < 7; i++) {
376 inode->i_zone[i] = zone;
377 if (!NEXT_BAD)
378 goto end_bad;
379 }
380 inode->i_zone[7] = ind = get_free_block();
381 memset(ind_block, 0, BLOCK_SIZE);
382 for (i = 0; i < 256; i++) {
383 ind_block[i] = zone;
384 if (!NEXT_BAD)
385 goto end_bad;
386 }
387 inode->i_zone[8] = dind = get_free_block();
388 memset(dind_block, 0, BLOCK_SIZE);
389 for (i = 0; i < 256; i++) {
390 write_block(ind, (char *) ind_block);
391 dind_block[i] = ind = get_free_block();
392 memset(ind_block, 0, BLOCK_SIZE);
393 for (j = 0; j < 256; j++) {
394 ind_block[j] = zone;
395 if (!NEXT_BAD)
396 goto end_bad;
397 }
398 }
399
400 bb_simple_error_msg_and_die("too many bad blocks");
401 end_bad:
402 if (ind)
403 write_block(ind, (char *) ind_block);
404 if (dind)
405 write_block(dind, (char *) dind_block);
406#undef ind_block
407#undef dind_block
408}
409#else
410void make_bad_inode2(void);
411#endif
412
413static void make_root_inode(void)
414{
415 struct minix1_inode *inode = &INODE_BUF1[MINIX_ROOT_INO];
416
417 mark_inode(MINIX_ROOT_INO);
418 inode->i_zone[0] = get_free_block();
419 inode->i_nlinks = 2;
420 inode->i_time = CUR_TIME;
421 if (G.badblocks)
422 inode->i_size = 3 * G.dirsize;
423 else {
424 G.root_block[2 * G.dirsize] = '\0';
425 G.root_block[2 * G.dirsize + 1] = '\0';
426 inode->i_size = 2 * G.dirsize;
427 }
428 inode->i_mode = S_IFDIR + 0755;
429 inode->i_uid = GETUID;
430 if (inode->i_uid)
431 inode->i_gid = GETGID;
432 write_block(inode->i_zone[0], G.root_block);
433}
434
435#if ENABLE_FEATURE_MINIX2
436static void make_root_inode2(void)
437{
438 struct minix2_inode *inode = &INODE_BUF2[MINIX_ROOT_INO];
439
440 mark_inode(MINIX_ROOT_INO);
441 inode->i_zone[0] = get_free_block();
442 inode->i_nlinks = 2;
443 inode->i_atime = inode->i_mtime = inode->i_ctime = CUR_TIME;
444 if (G.badblocks)
445 inode->i_size = 3 * G.dirsize;
446 else {
447 G.root_block[2 * G.dirsize] = '\0';
448 G.root_block[2 * G.dirsize + 1] = '\0';
449 inode->i_size = 2 * G.dirsize;
450 }
451 inode->i_mode = S_IFDIR + 0755;
452 inode->i_uid = GETUID;
453 if (inode->i_uid)
454 inode->i_gid = GETGID;
455 write_block(inode->i_zone[0], G.root_block);
456}
457#else
458void make_root_inode2(void);
459#endif
460
461
462
463
464
465static size_t do_check(char *buffer, size_t try, unsigned current_block)
466{
467 ssize_t got;
468
469
470 msg_eol = "seek failed during testing of blocks";
471 xlseek(dev_fd, current_block * BLOCK_SIZE, SEEK_SET);
472 msg_eol = "\n";
473
474
475 got = read(dev_fd, buffer, try * BLOCK_SIZE);
476 if (got < 0)
477 got = 0;
478 try = ((size_t)got) / BLOCK_SIZE;
479
480 if (got & (BLOCK_SIZE - 1))
481 fprintf(stderr, "Short read at block %u\n", (unsigned)(current_block + try));
482 return try;
483}
484
485static void alarm_intr(int alnum UNUSED_PARAM)
486{
487 if (G.currently_testing >= SB_ZONES)
488 return;
489 signal(SIGALRM, alarm_intr);
490 alarm(5);
491 if (!G.currently_testing)
492 return;
493 printf("%d ...", G.currently_testing);
494 fflush_all();
495}
496
497static void check_blocks(void)
498{
499 size_t try, got;
500
501 G.currently_testing = 0;
502 signal(SIGALRM, alarm_intr);
503 alarm(5);
504 while (G.currently_testing < SB_ZONES) {
505 msg_eol = "seek failed in check_blocks";
506 xlseek(dev_fd, G.currently_testing * BLOCK_SIZE, SEEK_SET);
507 msg_eol = "\n";
508 try = TEST_BUFFER_BLOCKS;
509 if (G.currently_testing + try > SB_ZONES)
510 try = SB_ZONES - G.currently_testing;
511 got = do_check(G.check_blocks_buffer, try, G.currently_testing);
512 G.currently_testing += got;
513 if (got == try)
514 continue;
515 if (G.currently_testing < SB_FIRSTZONE)
516 bb_simple_error_msg_and_die("bad blocks before data-area: cannot make fs");
517 mark_zone(G.currently_testing);
518 G.badblocks++;
519 G.currently_testing++;
520 }
521 alarm(0);
522 printf("%d bad block(s)\n", G.badblocks);
523}
524
525static void get_list_blocks(char *filename)
526{
527 FILE *listfile;
528 unsigned long blockno;
529
530 listfile = xfopen_for_read(filename);
531 while (!feof(listfile)) {
532 fscanf(listfile, "%lu\n", &blockno);
533 mark_zone(blockno);
534 G.badblocks++;
535 }
536 printf("%d bad block(s)\n", G.badblocks);
537}
538
539static void setup_tables(void)
540{
541 unsigned long inodes;
542 unsigned norm_firstzone;
543 unsigned sb_zmaps;
544 unsigned i;
545
546
547
548 SB_MAGIC = G.magic;
549 SB_ZONE_SIZE = 0;
550 SB_MAXSIZE = version2 ? 0x7fffffff : (7 + 512 + 512 * 512) * 1024;
551 if (version2)
552 SB.s_zones = G.total_blocks;
553 else
554 SB.s_nzones = G.total_blocks;
555
556
557 if (G.req_nr_inodes == 0)
558 inodes = G.total_blocks / 3;
559 else
560 inodes = G.req_nr_inodes;
561
562 if (version2)
563 inodes = (inodes + MINIX2_INODES_PER_BLOCK - 1) &
564 ~(MINIX2_INODES_PER_BLOCK - 1);
565 else
566 inodes = (inodes + MINIX1_INODES_PER_BLOCK - 1) &
567 ~(MINIX1_INODES_PER_BLOCK - 1);
568 if (inodes > 65535)
569 inodes = 65535;
570 SB_INODES = inodes;
571 SB_IMAPS = div_roundup(SB_INODES + 1, BITS_PER_BLOCK);
572
573
574
575
576
577
578
579
580
581 i = 999;
582 SB_ZMAPS = 0;
583 do {
584 norm_firstzone = NORM_FIRSTZONE;
585 sb_zmaps = div_roundup(G.total_blocks - norm_firstzone + 1, BITS_PER_BLOCK);
586 if (SB_ZMAPS == sb_zmaps) goto got_it;
587 SB_ZMAPS = sb_zmaps;
588
589 } while (--i);
590 bb_simple_error_msg_and_die("incompatible size/inode count, try different -i N");
591 got_it:
592
593 SB_FIRSTZONE = norm_firstzone;
594 G.inode_map = xmalloc(SB_IMAPS * BLOCK_SIZE);
595 G.zone_map = xmalloc(SB_ZMAPS * BLOCK_SIZE);
596 memset(G.inode_map, 0xff, SB_IMAPS * BLOCK_SIZE);
597 memset(G.zone_map, 0xff, SB_ZMAPS * BLOCK_SIZE);
598 for (i = SB_FIRSTZONE; i < SB_ZONES; i++)
599 unmark_zone(i);
600 for (i = MINIX_ROOT_INO; i <= SB_INODES; i++)
601 unmark_inode(i);
602 G.inode_buffer = xzalloc(INODE_BUFFER_SIZE);
603 printf("%lu inodes\n", (unsigned long)SB_INODES);
604 printf("%lu blocks\n", (unsigned long)SB_ZONES);
605 printf("Firstdatazone=%lu (%lu)\n", (unsigned long)SB_FIRSTZONE, (unsigned long)norm_firstzone);
606 printf("Zonesize=%u\n", BLOCK_SIZE << SB_ZONE_SIZE);
607 printf("Maxsize=%lu\n", (unsigned long)SB_MAXSIZE);
608}
609
610int mkfs_minix_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
611int mkfs_minix_main(int argc UNUSED_PARAM, char **argv)
612{
613 unsigned opt;
614 char *tmp;
615 char *str_i;
616 char *listfile = NULL;
617
618 INIT_G();
619
620 G.namelen = 30;
621 G.dirsize = 32;
622 G.magic = MINIX1_SUPER_MAGIC2;
623
624 if (INODE_SIZE1 * MINIX1_INODES_PER_BLOCK != BLOCK_SIZE)
625 bb_simple_error_msg_and_die("bad inode size");
626#if ENABLE_FEATURE_MINIX2
627 if (INODE_SIZE2 * MINIX2_INODES_PER_BLOCK != BLOCK_SIZE)
628 bb_simple_error_msg_and_die("bad inode size");
629#endif
630
631 opt = getopt32(argv, "ci:l:n:+v", &str_i, &listfile, &G.namelen);
632 argv += optind;
633
634 if (opt & 2) G.req_nr_inodes = xatoul(str_i);
635
636 if (opt & 8) {
637 if (G.namelen == 14) G.magic = MINIX1_SUPER_MAGIC;
638 else if (G.namelen == 30) G.magic = MINIX1_SUPER_MAGIC2;
639 else bb_show_usage();
640 G.dirsize = G.namelen + 2;
641 }
642 if (opt & 0x10) {
643#if ENABLE_FEATURE_MINIX2
644 version2 = 1;
645#else
646 bb_simple_error_msg_and_die("not compiled with minix v2 support");
647#endif
648 }
649
650 G.device_name = argv[0];
651 if (!G.device_name)
652 bb_show_usage();
653
654
655 if (find_mount_point(G.device_name, 0))
656 bb_simple_error_msg_and_die("can't format mounted filesystem");
657
658 xmove_fd(xopen(G.device_name, O_RDWR), dev_fd);
659
660 G.total_blocks = get_volume_size_in_bytes(dev_fd, argv[1], 1024, 1) / 1024;
661
662 if (G.total_blocks < 10)
663 bb_simple_error_msg_and_die("must have at least 10 blocks");
664
665 if (version2) {
666 G.magic = MINIX2_SUPER_MAGIC2;
667 if (G.namelen == 14)
668 G.magic = MINIX2_SUPER_MAGIC;
669 } else if (G.total_blocks > 65535)
670 G.total_blocks = 65535;
671#if 0
672 struct stat statbuf;
673 xfstat(dev_fd, &statbuf, G.device_name);
674
675 if (!S_ISBLK(statbuf.st_mode))
676 opt &= ~1;
677#if 0
678
679
680 else if (statbuf.st_rdev == 0x0300 || statbuf.st_rdev == 0x0340)
681
682 bb_error_msg_and_die("will not try "
683 "to make filesystem on '%s'", G.device_name);
684#endif
685#endif
686 tmp = G.root_block;
687 *(short *) tmp = 1;
688 strcpy(tmp + 2, ".");
689 tmp += G.dirsize;
690 *(short *) tmp = 1;
691 strcpy(tmp + 2, "..");
692 tmp += G.dirsize;
693 *(short *) tmp = 2;
694 strcpy(tmp + 2, ".badblocks");
695
696 setup_tables();
697
698 if (opt & 1)
699 check_blocks();
700 else if (listfile)
701 get_list_blocks(listfile);
702
703 if (version2) {
704 make_root_inode2();
705 make_bad_inode2();
706 } else {
707 make_root_inode();
708 make_bad_inode();
709 }
710
711 mark_good_blocks();
712 write_tables();
713 return 0;
714}
715