1
2
3
4
5
6
7
8
9
10
11#include "minix.h"
12#include <linux/buffer_head.h>
13#include <linux/highmem.h>
14#include <linux/swap.h>
15
16typedef struct minix_dir_entry minix_dirent;
17typedef struct minix3_dir_entry minix3_dirent;
18
19static int minix_readdir(struct file *, void *, filldir_t);
20
21const struct file_operations minix_dir_operations = {
22 .llseek = generic_file_llseek,
23 .read = generic_read_dir,
24 .readdir = minix_readdir,
25 .fsync = simple_fsync,
26};
27
28static inline void dir_put_page(struct page *page)
29{
30 kunmap(page);
31 page_cache_release(page);
32}
33
34
35
36
37
38static unsigned
39minix_last_byte(struct inode *inode, unsigned long page_nr)
40{
41 unsigned last_byte = PAGE_CACHE_SIZE;
42
43 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
44 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
45 return last_byte;
46}
47
48static inline unsigned long dir_pages(struct inode *inode)
49{
50 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
51}
52
53static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
54{
55 struct address_space *mapping = page->mapping;
56 struct inode *dir = mapping->host;
57 int err = 0;
58 block_write_end(NULL, mapping, pos, len, len, page, NULL);
59
60 if (pos+len > dir->i_size) {
61 i_size_write(dir, pos+len);
62 mark_inode_dirty(dir);
63 }
64 if (IS_DIRSYNC(dir))
65 err = write_one_page(page, 1);
66 else
67 unlock_page(page);
68 return err;
69}
70
71static struct page * dir_get_page(struct inode *dir, unsigned long n)
72{
73 struct address_space *mapping = dir->i_mapping;
74 struct page *page = read_mapping_page(mapping, n, NULL);
75 if (!IS_ERR(page)) {
76 kmap(page);
77 if (!PageUptodate(page))
78 goto fail;
79 }
80 return page;
81
82fail:
83 dir_put_page(page);
84 return ERR_PTR(-EIO);
85}
86
87static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
88{
89 return (void*)((char*)de + sbi->s_dirsize);
90}
91
92static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
93{
94 unsigned long pos = filp->f_pos;
95 struct inode *inode = filp->f_path.dentry->d_inode;
96 struct super_block *sb = inode->i_sb;
97 unsigned offset = pos & ~PAGE_CACHE_MASK;
98 unsigned long n = pos >> PAGE_CACHE_SHIFT;
99 unsigned long npages = dir_pages(inode);
100 struct minix_sb_info *sbi = minix_sb(sb);
101 unsigned chunk_size = sbi->s_dirsize;
102 char *name;
103 __u32 inumber;
104
105 pos = (pos + chunk_size-1) & ~(chunk_size-1);
106 if (pos >= inode->i_size)
107 goto done;
108
109 for ( ; n < npages; n++, offset = 0) {
110 char *p, *kaddr, *limit;
111 struct page *page = dir_get_page(inode, n);
112
113 if (IS_ERR(page))
114 continue;
115 kaddr = (char *)page_address(page);
116 p = kaddr+offset;
117 limit = kaddr + minix_last_byte(inode, n) - chunk_size;
118 for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
119 if (sbi->s_version == MINIX_V3) {
120 minix3_dirent *de3 = (minix3_dirent *)p;
121 name = de3->name;
122 inumber = de3->inode;
123 } else {
124 minix_dirent *de = (minix_dirent *)p;
125 name = de->name;
126 inumber = de->inode;
127 }
128 if (inumber) {
129 int over;
130
131 unsigned l = strnlen(name, sbi->s_namelen);
132 offset = p - kaddr;
133 over = filldir(dirent, name, l,
134 (n << PAGE_CACHE_SHIFT) | offset,
135 inumber, DT_UNKNOWN);
136 if (over) {
137 dir_put_page(page);
138 goto done;
139 }
140 }
141 }
142 dir_put_page(page);
143 }
144
145done:
146 filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
147 return 0;
148}
149
150static inline int namecompare(int len, int maxlen,
151 const char * name, const char * buffer)
152{
153 if (len < maxlen && buffer[len])
154 return 0;
155 return !memcmp(name, buffer, len);
156}
157
158
159
160
161
162
163
164
165
166minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
167{
168 const char * name = dentry->d_name.name;
169 int namelen = dentry->d_name.len;
170 struct inode * dir = dentry->d_parent->d_inode;
171 struct super_block * sb = dir->i_sb;
172 struct minix_sb_info * sbi = minix_sb(sb);
173 unsigned long n;
174 unsigned long npages = dir_pages(dir);
175 struct page *page = NULL;
176 char *p;
177
178 char *namx;
179 __u32 inumber;
180 *res_page = NULL;
181
182 for (n = 0; n < npages; n++) {
183 char *kaddr, *limit;
184
185 page = dir_get_page(dir, n);
186 if (IS_ERR(page))
187 continue;
188
189 kaddr = (char*)page_address(page);
190 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
191 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
192 if (sbi->s_version == MINIX_V3) {
193 minix3_dirent *de3 = (minix3_dirent *)p;
194 namx = de3->name;
195 inumber = de3->inode;
196 } else {
197 minix_dirent *de = (minix_dirent *)p;
198 namx = de->name;
199 inumber = de->inode;
200 }
201 if (!inumber)
202 continue;
203 if (namecompare(namelen, sbi->s_namelen, name, namx))
204 goto found;
205 }
206 dir_put_page(page);
207 }
208 return NULL;
209
210found:
211 *res_page = page;
212 return (minix_dirent *)p;
213}
214
215int minix_add_link(struct dentry *dentry, struct inode *inode)
216{
217 struct inode *dir = dentry->d_parent->d_inode;
218 const char * name = dentry->d_name.name;
219 int namelen = dentry->d_name.len;
220 struct super_block * sb = dir->i_sb;
221 struct minix_sb_info * sbi = minix_sb(sb);
222 struct page *page = NULL;
223 unsigned long npages = dir_pages(dir);
224 unsigned long n;
225 char *kaddr, *p;
226 minix_dirent *de;
227 minix3_dirent *de3;
228 loff_t pos;
229 int err;
230 char *namx = NULL;
231 __u32 inumber;
232
233
234
235
236
237
238 for (n = 0; n <= npages; n++) {
239 char *limit, *dir_end;
240
241 page = dir_get_page(dir, n);
242 err = PTR_ERR(page);
243 if (IS_ERR(page))
244 goto out;
245 lock_page(page);
246 kaddr = (char*)page_address(page);
247 dir_end = kaddr + minix_last_byte(dir, n);
248 limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
249 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
250 de = (minix_dirent *)p;
251 de3 = (minix3_dirent *)p;
252 if (sbi->s_version == MINIX_V3) {
253 namx = de3->name;
254 inumber = de3->inode;
255 } else {
256 namx = de->name;
257 inumber = de->inode;
258 }
259 if (p == dir_end) {
260
261 if (sbi->s_version == MINIX_V3)
262 de3->inode = 0;
263 else
264 de->inode = 0;
265 goto got_it;
266 }
267 if (!inumber)
268 goto got_it;
269 err = -EEXIST;
270 if (namecompare(namelen, sbi->s_namelen, name, namx))
271 goto out_unlock;
272 }
273 unlock_page(page);
274 dir_put_page(page);
275 }
276 BUG();
277 return -EINVAL;
278
279got_it:
280 pos = page_offset(page) + p - (char *)page_address(page);
281 err = __minix_write_begin(NULL, page->mapping, pos, sbi->s_dirsize,
282 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
283 if (err)
284 goto out_unlock;
285 memcpy (namx, name, namelen);
286 if (sbi->s_version == MINIX_V3) {
287 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
288 de3->inode = inode->i_ino;
289 } else {
290 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
291 de->inode = inode->i_ino;
292 }
293 err = dir_commit_chunk(page, pos, sbi->s_dirsize);
294 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
295 mark_inode_dirty(dir);
296out_put:
297 dir_put_page(page);
298out:
299 return err;
300out_unlock:
301 unlock_page(page);
302 goto out_put;
303}
304
305int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
306{
307 struct address_space *mapping = page->mapping;
308 struct inode *inode = (struct inode*)mapping->host;
309 char *kaddr = page_address(page);
310 loff_t pos = page_offset(page) + (char*)de - kaddr;
311 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
312 unsigned len = sbi->s_dirsize;
313 int err;
314
315 lock_page(page);
316 err = __minix_write_begin(NULL, mapping, pos, len,
317 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
318 if (err == 0) {
319 if (sbi->s_version == MINIX_V3)
320 ((minix3_dirent *) de)->inode = 0;
321 else
322 de->inode = 0;
323 err = dir_commit_chunk(page, pos, len);
324 } else {
325 unlock_page(page);
326 }
327 dir_put_page(page);
328 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
329 mark_inode_dirty(inode);
330 return err;
331}
332
333int minix_make_empty(struct inode *inode, struct inode *dir)
334{
335 struct address_space *mapping = inode->i_mapping;
336 struct page *page = grab_cache_page(mapping, 0);
337 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
338 char *kaddr;
339 int err;
340
341 if (!page)
342 return -ENOMEM;
343 err = __minix_write_begin(NULL, mapping, 0, 2 * sbi->s_dirsize,
344 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
345 if (err) {
346 unlock_page(page);
347 goto fail;
348 }
349
350 kaddr = kmap_atomic(page, KM_USER0);
351 memset(kaddr, 0, PAGE_CACHE_SIZE);
352
353 if (sbi->s_version == MINIX_V3) {
354 minix3_dirent *de3 = (minix3_dirent *)kaddr;
355
356 de3->inode = inode->i_ino;
357 strcpy(de3->name, ".");
358 de3 = minix_next_entry(de3, sbi);
359 de3->inode = dir->i_ino;
360 strcpy(de3->name, "..");
361 } else {
362 minix_dirent *de = (minix_dirent *)kaddr;
363
364 de->inode = inode->i_ino;
365 strcpy(de->name, ".");
366 de = minix_next_entry(de, sbi);
367 de->inode = dir->i_ino;
368 strcpy(de->name, "..");
369 }
370 kunmap_atomic(kaddr, KM_USER0);
371
372 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
373fail:
374 page_cache_release(page);
375 return err;
376}
377
378
379
380
381int minix_empty_dir(struct inode * inode)
382{
383 struct page *page = NULL;
384 unsigned long i, npages = dir_pages(inode);
385 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
386 char *name;
387 __u32 inumber;
388
389 for (i = 0; i < npages; i++) {
390 char *p, *kaddr, *limit;
391
392 page = dir_get_page(inode, i);
393 if (IS_ERR(page))
394 continue;
395
396 kaddr = (char *)page_address(page);
397 limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
398 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
399 if (sbi->s_version == MINIX_V3) {
400 minix3_dirent *de3 = (minix3_dirent *)p;
401 name = de3->name;
402 inumber = de3->inode;
403 } else {
404 minix_dirent *de = (minix_dirent *)p;
405 name = de->name;
406 inumber = de->inode;
407 }
408
409 if (inumber != 0) {
410
411 if (name[0] != '.')
412 goto not_empty;
413 if (!name[1]) {
414 if (inumber != inode->i_ino)
415 goto not_empty;
416 } else if (name[1] != '.')
417 goto not_empty;
418 else if (name[2])
419 goto not_empty;
420 }
421 }
422 dir_put_page(page);
423 }
424 return 1;
425
426not_empty:
427 dir_put_page(page);
428 return 0;
429}
430
431
432void minix_set_link(struct minix_dir_entry *de, struct page *page,
433 struct inode *inode)
434{
435 struct address_space *mapping = page->mapping;
436 struct inode *dir = mapping->host;
437 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
438 loff_t pos = page_offset(page) +
439 (char *)de-(char*)page_address(page);
440 int err;
441
442 lock_page(page);
443
444 err = __minix_write_begin(NULL, mapping, pos, sbi->s_dirsize,
445 AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
446 if (err == 0) {
447 if (sbi->s_version == MINIX_V3)
448 ((minix3_dirent *) de)->inode = inode->i_ino;
449 else
450 de->inode = inode->i_ino;
451 err = dir_commit_chunk(page, pos, sbi->s_dirsize);
452 } else {
453 unlock_page(page);
454 }
455 dir_put_page(page);
456 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
457 mark_inode_dirty(dir);
458}
459
460struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
461{
462 struct page *page = dir_get_page(dir, 0);
463 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
464 struct minix_dir_entry *de = NULL;
465
466 if (!IS_ERR(page)) {
467 de = minix_next_entry(page_address(page), sbi);
468 *p = page;
469 }
470 return de;
471}
472
473ino_t minix_inode_by_name(struct dentry *dentry)
474{
475 struct page *page;
476 struct minix_dir_entry *de = minix_find_entry(dentry, &page);
477 ino_t res = 0;
478
479 if (de) {
480 struct address_space *mapping = page->mapping;
481 struct inode *inode = mapping->host;
482 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
483
484 if (sbi->s_version == MINIX_V3)
485 res = ((minix3_dirent *) de)->inode;
486 else
487 res = de->inode;
488 dir_put_page(page);
489 }
490 return res;
491}
492