1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97#include "libbb.h"
98#include <mntent.h>
99
100#include "minix.h"
101
102
103#if 1
104# define CUR_TIME 0
105# define GETUID 0
106# define GETGID 0
107#else
108
109# define CUR_TIME time(NULL)
110# define GETUID getuid()
111# define GETGID getgid()
112#endif
113
114enum {
115 MAX_GOOD_BLOCKS = 512,
116 TEST_BUFFER_BLOCKS = 16,
117};
118
119#if !ENABLE_FEATURE_MINIX2
120enum { version2 = 0 };
121#endif
122
123enum { dev_fd = 3 };
124
125struct globals {
126#if ENABLE_FEATURE_MINIX2
127 smallint version2;
128#define version2 G.version2
129#endif
130 char *device_name;
131 uint32_t total_blocks;
132 int badblocks;
133 int namelen;
134 int dirsize;
135 int magic;
136 char *inode_buffer;
137 char *inode_map;
138 char *zone_map;
139 int used_good_blocks;
140 unsigned long req_nr_inodes;
141 unsigned currently_testing;
142
143 char root_block[BLOCK_SIZE];
144 char superblock_buffer[BLOCK_SIZE];
145 char boot_block_buffer[512];
146 unsigned short good_blocks_table[MAX_GOOD_BLOCKS];
147
148 char check_blocks_buffer[BLOCK_SIZE * TEST_BUFFER_BLOCKS];
149
150 unsigned short ind_block1[BLOCK_SIZE >> 1];
151 unsigned short dind_block1[BLOCK_SIZE >> 1];
152 unsigned long ind_block2[BLOCK_SIZE >> 2];
153 unsigned long dind_block2[BLOCK_SIZE >> 2];
154};
155#define G (*ptr_to_globals)
156#define INIT_G() do { \
157 SET_PTR_TO_GLOBALS(xzalloc(sizeof(G))); \
158} while (0)
159
160static ALWAYS_INLINE unsigned div_roundup(unsigned size, unsigned n)
161{
162 return (size + n-1) / n;
163}
164
165#define INODE_BUF1 (((struct minix1_inode*)G.inode_buffer) - 1)
166#define INODE_BUF2 (((struct minix2_inode*)G.inode_buffer) - 1)
167
168#define SB (*(struct minix_superblock*)G.superblock_buffer)
169
170#define SB_INODES (SB.s_ninodes)
171#define SB_IMAPS (SB.s_imap_blocks)
172#define SB_ZMAPS (SB.s_zmap_blocks)
173#define SB_FIRSTZONE (SB.s_firstdatazone)
174#define SB_ZONE_SIZE (SB.s_log_zone_size)
175#define SB_MAXSIZE (SB.s_max_size)
176#define SB_MAGIC (SB.s_magic)
177
178#if !ENABLE_FEATURE_MINIX2
179# define SB_ZONES (SB.s_nzones)
180# define INODE_BLOCKS div_roundup(SB_INODES, MINIX1_INODES_PER_BLOCK)
181#else
182# define SB_ZONES (version2 ? SB.s_zones : SB.s_nzones)
183# define INODE_BLOCKS div_roundup(SB_INODES, \
184 (version2 ? MINIX2_INODES_PER_BLOCK : MINIX1_INODES_PER_BLOCK))
185#endif
186
187#define INODE_BUFFER_SIZE (INODE_BLOCKS * BLOCK_SIZE)
188#define NORM_FIRSTZONE (2 + SB_IMAPS + SB_ZMAPS + INODE_BLOCKS)
189
190
191
192
193static int minix_bit(const char* a, unsigned i)
194{
195 return a[i >> 3] & (1<<(i & 7));
196}
197
198static void minix_setbit(char *a, unsigned i)
199{
200 setbit(a, i);
201}
202static void minix_clrbit(char *a, unsigned i)
203{
204 clrbit(a, i);
205}
206
207
208#define zone_in_use(x) minix_bit(G.zone_map,(x)-SB_FIRSTZONE+1)
209
210
211#define mark_inode(x) minix_setbit(G.inode_map,(x))
212#define unmark_inode(x) minix_clrbit(G.inode_map,(x))
213#define mark_zone(x) minix_setbit(G.zone_map,(x)-SB_FIRSTZONE+1)
214#define unmark_zone(x) minix_clrbit(G.zone_map,(x)-SB_FIRSTZONE+1)
215
216#ifndef BLKGETSIZE
217# define BLKGETSIZE _IO(0x12,96)
218#endif
219
220static void write_tables(void)
221{
222
223 SB.s_state |= MINIX_VALID_FS;
224 SB.s_state &= ~MINIX_ERROR_FS;
225
226 msg_eol = "seek to 0 failed";
227 xlseek(dev_fd, 0, SEEK_SET);
228
229 msg_eol = "can't clear boot sector";
230 xwrite(dev_fd, G.boot_block_buffer, 512);
231
232 msg_eol = "seek to BLOCK_SIZE failed";
233 xlseek(dev_fd, BLOCK_SIZE, SEEK_SET);
234
235 msg_eol = "can't write superblock";
236 xwrite(dev_fd, G.superblock_buffer, BLOCK_SIZE);
237
238 msg_eol = "can't write inode map";
239 xwrite(dev_fd, G.inode_map, SB_IMAPS * BLOCK_SIZE);
240
241 msg_eol = "can't write zone map";
242 xwrite(dev_fd, G.zone_map, SB_ZMAPS * BLOCK_SIZE);
243
244 msg_eol = "can't write inodes";
245 xwrite(dev_fd, G.inode_buffer, INODE_BUFFER_SIZE);
246
247 msg_eol = "\n";
248}
249
250static void write_block(int blk, char *buffer)
251{
252 xlseek(dev_fd, blk * BLOCK_SIZE, SEEK_SET);
253 xwrite(dev_fd, buffer, BLOCK_SIZE);
254}
255
256static int get_free_block(void)
257{
258 int blk;
259
260 if (G.used_good_blocks + 1 >= MAX_GOOD_BLOCKS)
261 bb_error_msg_and_die("too many bad blocks");
262 if (G.used_good_blocks)
263 blk = G.good_blocks_table[G.used_good_blocks - 1] + 1;
264 else
265 blk = SB_FIRSTZONE;
266 while (blk < SB_ZONES && zone_in_use(blk))
267 blk++;
268 if (blk >= SB_ZONES)
269 bb_error_msg_and_die("not enough good blocks");
270 G.good_blocks_table[G.used_good_blocks] = blk;
271 G.used_good_blocks++;
272 return blk;
273}
274
275static void mark_good_blocks(void)
276{
277 int blk;
278
279 for (blk = 0; blk < G.used_good_blocks; blk++)
280 mark_zone(G.good_blocks_table[blk]);
281}
282
283static int next(int zone)
284{
285 if (!zone)
286 zone = SB_FIRSTZONE - 1;
287 while (++zone < SB_ZONES)
288 if (zone_in_use(zone))
289 return zone;
290 return 0;
291}
292
293static void make_bad_inode(void)
294{
295 struct minix1_inode *inode = &INODE_BUF1[MINIX_BAD_INO];
296 int i, j, zone;
297 int ind = 0, dind = 0;
298
299
300
301
302#define ind_block (G.ind_block1)
303#define dind_block (G.dind_block1)
304
305#define NEXT_BAD (zone = next(zone))
306
307 if (!G.badblocks)
308 return;
309 mark_inode(MINIX_BAD_INO);
310 inode->i_nlinks = 1;
311
312
313 inode->i_time = CUR_TIME;
314 inode->i_mode = S_IFREG + 0000;
315 inode->i_size = G.badblocks * BLOCK_SIZE;
316 zone = next(0);
317 for (i = 0; i < 7; i++) {
318 inode->i_zone[i] = zone;
319 if (!NEXT_BAD)
320 goto end_bad;
321 }
322 inode->i_zone[7] = ind = get_free_block();
323 memset(ind_block, 0, BLOCK_SIZE);
324 for (i = 0; i < 512; i++) {
325 ind_block[i] = zone;
326 if (!NEXT_BAD)
327 goto end_bad;
328 }
329 inode->i_zone[8] = dind = get_free_block();
330 memset(dind_block, 0, BLOCK_SIZE);
331 for (i = 0; i < 512; i++) {
332 write_block(ind, (char *) ind_block);
333 dind_block[i] = ind = get_free_block();
334 memset(ind_block, 0, BLOCK_SIZE);
335 for (j = 0; j < 512; j++) {
336 ind_block[j] = zone;
337 if (!NEXT_BAD)
338 goto end_bad;
339 }
340 }
341 bb_error_msg_and_die("too many bad blocks");
342 end_bad:
343 if (ind)
344 write_block(ind, (char *) ind_block);
345 if (dind)
346 write_block(dind, (char *) dind_block);
347#undef ind_block
348#undef dind_block
349}
350
351#if ENABLE_FEATURE_MINIX2
352static void make_bad_inode2(void)
353{
354 struct minix2_inode *inode = &INODE_BUF2[MINIX_BAD_INO];
355 int i, j, zone;
356 int ind = 0, dind = 0;
357
358
359
360
361#define ind_block (G.ind_block2)
362#define dind_block (G.dind_block2)
363
364 if (!G.badblocks)
365 return;
366 mark_inode(MINIX_BAD_INO);
367 inode->i_nlinks = 1;
368 inode->i_atime = inode->i_mtime = inode->i_ctime = CUR_TIME;
369 inode->i_mode = S_IFREG + 0000;
370 inode->i_size = G.badblocks * BLOCK_SIZE;
371 zone = next(0);
372 for (i = 0; i < 7; i++) {
373 inode->i_zone[i] = zone;
374 if (!NEXT_BAD)
375 goto end_bad;
376 }
377 inode->i_zone[7] = ind = get_free_block();
378 memset(ind_block, 0, BLOCK_SIZE);
379 for (i = 0; i < 256; i++) {
380 ind_block[i] = zone;
381 if (!NEXT_BAD)
382 goto end_bad;
383 }
384 inode->i_zone[8] = dind = get_free_block();
385 memset(dind_block, 0, BLOCK_SIZE);
386 for (i = 0; i < 256; i++) {
387 write_block(ind, (char *) ind_block);
388 dind_block[i] = ind = get_free_block();
389 memset(ind_block, 0, BLOCK_SIZE);
390 for (j = 0; j < 256; j++) {
391 ind_block[j] = zone;
392 if (!NEXT_BAD)
393 goto end_bad;
394 }
395 }
396
397 bb_error_msg_and_die("too many bad blocks");
398 end_bad:
399 if (ind)
400 write_block(ind, (char *) ind_block);
401 if (dind)
402 write_block(dind, (char *) dind_block);
403#undef ind_block
404#undef dind_block
405}
406#else
407void make_bad_inode2(void);
408#endif
409
410static void make_root_inode(void)
411{
412 struct minix1_inode *inode = &INODE_BUF1[MINIX_ROOT_INO];
413
414 mark_inode(MINIX_ROOT_INO);
415 inode->i_zone[0] = get_free_block();
416 inode->i_nlinks = 2;
417 inode->i_time = CUR_TIME;
418 if (G.badblocks)
419 inode->i_size = 3 * G.dirsize;
420 else {
421 G.root_block[2 * G.dirsize] = '\0';
422 G.root_block[2 * G.dirsize + 1] = '\0';
423 inode->i_size = 2 * G.dirsize;
424 }
425 inode->i_mode = S_IFDIR + 0755;
426 inode->i_uid = GETUID;
427 if (inode->i_uid)
428 inode->i_gid = GETGID;
429 write_block(inode->i_zone[0], G.root_block);
430}
431
432#if ENABLE_FEATURE_MINIX2
433static void make_root_inode2(void)
434{
435 struct minix2_inode *inode = &INODE_BUF2[MINIX_ROOT_INO];
436
437 mark_inode(MINIX_ROOT_INO);
438 inode->i_zone[0] = get_free_block();
439 inode->i_nlinks = 2;
440 inode->i_atime = inode->i_mtime = inode->i_ctime = CUR_TIME;
441 if (G.badblocks)
442 inode->i_size = 3 * G.dirsize;
443 else {
444 G.root_block[2 * G.dirsize] = '\0';
445 G.root_block[2 * G.dirsize + 1] = '\0';
446 inode->i_size = 2 * G.dirsize;
447 }
448 inode->i_mode = S_IFDIR + 0755;
449 inode->i_uid = GETUID;
450 if (inode->i_uid)
451 inode->i_gid = GETGID;
452 write_block(inode->i_zone[0], G.root_block);
453}
454#else
455void make_root_inode2(void);
456#endif
457
458
459
460
461
462static size_t do_check(char *buffer, size_t try, unsigned current_block)
463{
464 ssize_t got;
465
466
467 msg_eol = "seek failed during testing of blocks";
468 xlseek(dev_fd, current_block * BLOCK_SIZE, SEEK_SET);
469 msg_eol = "\n";
470
471
472 got = read(dev_fd, buffer, try * BLOCK_SIZE);
473 if (got < 0)
474 got = 0;
475 try = ((size_t)got) / BLOCK_SIZE;
476
477 if (got & (BLOCK_SIZE - 1))
478 fprintf(stderr, "Short read at block %u\n", (unsigned)(current_block + try));
479 return try;
480}
481
482static void alarm_intr(int alnum UNUSED_PARAM)
483{
484 if (G.currently_testing >= SB_ZONES)
485 return;
486 signal(SIGALRM, alarm_intr);
487 alarm(5);
488 if (!G.currently_testing)
489 return;
490 printf("%d ...", G.currently_testing);
491 fflush_all();
492}
493
494static void check_blocks(void)
495{
496 size_t try, got;
497
498 G.currently_testing = 0;
499 signal(SIGALRM, alarm_intr);
500 alarm(5);
501 while (G.currently_testing < SB_ZONES) {
502 msg_eol = "seek failed in check_blocks";
503 xlseek(dev_fd, G.currently_testing * BLOCK_SIZE, SEEK_SET);
504 msg_eol = "\n";
505 try = TEST_BUFFER_BLOCKS;
506 if (G.currently_testing + try > SB_ZONES)
507 try = SB_ZONES - G.currently_testing;
508 got = do_check(G.check_blocks_buffer, try, G.currently_testing);
509 G.currently_testing += got;
510 if (got == try)
511 continue;
512 if (G.currently_testing < SB_FIRSTZONE)
513 bb_error_msg_and_die("bad blocks before data-area: cannot make fs");
514 mark_zone(G.currently_testing);
515 G.badblocks++;
516 G.currently_testing++;
517 }
518 alarm(0);
519 printf("%d bad block(s)\n", G.badblocks);
520}
521
522static void get_list_blocks(char *filename)
523{
524 FILE *listfile;
525 unsigned long blockno;
526
527 listfile = xfopen_for_read(filename);
528 while (!feof(listfile)) {
529 fscanf(listfile, "%lu\n", &blockno);
530 mark_zone(blockno);
531 G.badblocks++;
532 }
533 printf("%d bad block(s)\n", G.badblocks);
534}
535
536static void setup_tables(void)
537{
538 unsigned long inodes;
539 unsigned norm_firstzone;
540 unsigned sb_zmaps;
541 unsigned i;
542
543
544
545 SB_MAGIC = G.magic;
546 SB_ZONE_SIZE = 0;
547 SB_MAXSIZE = version2 ? 0x7fffffff : (7 + 512 + 512 * 512) * 1024;
548 if (version2)
549 SB.s_zones = G.total_blocks;
550 else
551 SB.s_nzones = G.total_blocks;
552
553
554 if (G.req_nr_inodes == 0)
555 inodes = G.total_blocks / 3;
556 else
557 inodes = G.req_nr_inodes;
558
559 if (version2)
560 inodes = (inodes + MINIX2_INODES_PER_BLOCK - 1) &
561 ~(MINIX2_INODES_PER_BLOCK - 1);
562 else
563 inodes = (inodes + MINIX1_INODES_PER_BLOCK - 1) &
564 ~(MINIX1_INODES_PER_BLOCK - 1);
565 if (inodes > 65535)
566 inodes = 65535;
567 SB_INODES = inodes;
568 SB_IMAPS = div_roundup(SB_INODES + 1, BITS_PER_BLOCK);
569
570
571
572
573
574
575
576
577
578 i = 999;
579 SB_ZMAPS = 0;
580 do {
581 norm_firstzone = NORM_FIRSTZONE;
582 sb_zmaps = div_roundup(G.total_blocks - norm_firstzone + 1, BITS_PER_BLOCK);
583 if (SB_ZMAPS == sb_zmaps) goto got_it;
584 SB_ZMAPS = sb_zmaps;
585
586 } while (--i);
587 bb_error_msg_and_die("incompatible size/inode count, try different -i N");
588 got_it:
589
590 SB_FIRSTZONE = norm_firstzone;
591 G.inode_map = xmalloc(SB_IMAPS * BLOCK_SIZE);
592 G.zone_map = xmalloc(SB_ZMAPS * BLOCK_SIZE);
593 memset(G.inode_map, 0xff, SB_IMAPS * BLOCK_SIZE);
594 memset(G.zone_map, 0xff, SB_ZMAPS * BLOCK_SIZE);
595 for (i = SB_FIRSTZONE; i < SB_ZONES; i++)
596 unmark_zone(i);
597 for (i = MINIX_ROOT_INO; i <= SB_INODES; i++)
598 unmark_inode(i);
599 G.inode_buffer = xzalloc(INODE_BUFFER_SIZE);
600 printf("%lu inodes\n", (unsigned long)SB_INODES);
601 printf("%lu blocks\n", (unsigned long)SB_ZONES);
602 printf("Firstdatazone=%lu (%lu)\n", (unsigned long)SB_FIRSTZONE, (unsigned long)norm_firstzone);
603 printf("Zonesize=%u\n", BLOCK_SIZE << SB_ZONE_SIZE);
604 printf("Maxsize=%lu\n", (unsigned long)SB_MAXSIZE);
605}
606
607int mkfs_minix_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
608int mkfs_minix_main(int argc UNUSED_PARAM, char **argv)
609{
610 unsigned opt;
611 char *tmp;
612 char *str_i;
613 char *listfile = NULL;
614
615 INIT_G();
616
617 G.namelen = 30;
618 G.dirsize = 32;
619 G.magic = MINIX1_SUPER_MAGIC2;
620
621 if (INODE_SIZE1 * MINIX1_INODES_PER_BLOCK != BLOCK_SIZE)
622 bb_error_msg_and_die("bad inode size");
623#if ENABLE_FEATURE_MINIX2
624 if (INODE_SIZE2 * MINIX2_INODES_PER_BLOCK != BLOCK_SIZE)
625 bb_error_msg_and_die("bad inode size");
626#endif
627
628 opt = getopt32(argv, "ci:l:n:+v", &str_i, &listfile, &G.namelen);
629 argv += optind;
630
631 if (opt & 2) G.req_nr_inodes = xatoul(str_i);
632
633 if (opt & 8) {
634 if (G.namelen == 14) G.magic = MINIX1_SUPER_MAGIC;
635 else if (G.namelen == 30) G.magic = MINIX1_SUPER_MAGIC2;
636 else bb_show_usage();
637 G.dirsize = G.namelen + 2;
638 }
639 if (opt & 0x10) {
640#if ENABLE_FEATURE_MINIX2
641 version2 = 1;
642#else
643 bb_error_msg_and_die("not compiled with minix v2 support");
644#endif
645 }
646
647 G.device_name = argv[0];
648 if (!G.device_name)
649 bb_show_usage();
650
651
652 if (find_mount_point(G.device_name, 0))
653 bb_error_msg_and_die("can't format mounted filesystem");
654
655 xmove_fd(xopen(G.device_name, O_RDWR), dev_fd);
656
657 G.total_blocks = get_volume_size_in_bytes(dev_fd, argv[1], 1024, 1) / 1024;
658
659 if (G.total_blocks < 10)
660 bb_error_msg_and_die("must have at least 10 blocks");
661
662 if (version2) {
663 G.magic = MINIX2_SUPER_MAGIC2;
664 if (G.namelen == 14)
665 G.magic = MINIX2_SUPER_MAGIC;
666 } else if (G.total_blocks > 65535)
667 G.total_blocks = 65535;
668#if 0
669 struct stat statbuf;
670 xfstat(dev_fd, &statbuf, G.device_name);
671
672 if (!S_ISBLK(statbuf.st_mode))
673 opt &= ~1;
674#if 0
675
676
677 else if (statbuf.st_rdev == 0x0300 || statbuf.st_rdev == 0x0340)
678
679 bb_error_msg_and_die("will not try "
680 "to make filesystem on '%s'", G.device_name);
681#endif
682#endif
683 tmp = G.root_block;
684 *(short *) tmp = 1;
685 strcpy(tmp + 2, ".");
686 tmp += G.dirsize;
687 *(short *) tmp = 1;
688 strcpy(tmp + 2, "..");
689 tmp += G.dirsize;
690 *(short *) tmp = 2;
691 strcpy(tmp + 2, ".badblocks");
692
693 setup_tables();
694
695 if (opt & 1)
696 check_blocks();
697 else if (listfile)
698 get_list_blocks(listfile);
699
700 if (version2) {
701 make_root_inode2();
702 make_bad_inode2();
703 } else {
704 make_root_inode();
705 make_bad_inode();
706 }
707
708 mark_good_blocks();
709 write_tables();
710 return 0;
711}
712