1
2
3
4
5
6
7
8
9
10#include <linux/buffer_head.h>
11#include <linux/module.h>
12#include <linux/fs.h>
13#include "efs.h"
14#include <linux/efs_fs_sb.h>
15
16static int efs_readpage(struct file *file, struct page *page)
17{
18 return block_read_full_page(page,efs_get_block);
19}
20static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
21{
22 return generic_block_bmap(mapping,block,efs_get_block);
23}
24static const struct address_space_operations efs_aops = {
25 .readpage = efs_readpage,
26 .bmap = _efs_bmap
27};
28
29static inline void extent_copy(efs_extent *src, efs_extent *dst) {
30
31
32
33
34
35
36 dst->cooked.ex_magic = (unsigned int) src->raw[0];
37 dst->cooked.ex_bn = ((unsigned int) src->raw[1] << 16) |
38 ((unsigned int) src->raw[2] << 8) |
39 ((unsigned int) src->raw[3] << 0);
40 dst->cooked.ex_length = (unsigned int) src->raw[4];
41 dst->cooked.ex_offset = ((unsigned int) src->raw[5] << 16) |
42 ((unsigned int) src->raw[6] << 8) |
43 ((unsigned int) src->raw[7] << 0);
44 return;
45}
46
47struct inode *efs_iget(struct super_block *super, unsigned long ino)
48{
49 int i, inode_index;
50 dev_t device;
51 u32 rdev;
52 struct buffer_head *bh;
53 struct efs_sb_info *sb = SUPER_INFO(super);
54 struct efs_inode_info *in;
55 efs_block_t block, offset;
56 struct efs_dinode *efs_inode;
57 struct inode *inode;
58
59 inode = iget_locked(super, ino);
60 if (!inode)
61 return ERR_PTR(-ENOMEM);
62 if (!(inode->i_state & I_NEW))
63 return inode;
64
65 in = INODE_INFO(inode);
66
67
68
69
70
71
72
73
74
75
76
77
78
79 inode_index = inode->i_ino /
80 (EFS_BLOCKSIZE / sizeof(struct efs_dinode));
81
82 block = sb->fs_start + sb->first_block +
83 (sb->group_size * (inode_index / sb->inode_blocks)) +
84 (inode_index % sb->inode_blocks);
85
86 offset = (inode->i_ino %
87 (EFS_BLOCKSIZE / sizeof(struct efs_dinode))) *
88 sizeof(struct efs_dinode);
89
90 bh = sb_bread(inode->i_sb, block);
91 if (!bh) {
92 pr_warn("%s() failed at block %d\n", __func__, block);
93 goto read_inode_error;
94 }
95
96 efs_inode = (struct efs_dinode *) (bh->b_data + offset);
97
98 inode->i_mode = be16_to_cpu(efs_inode->di_mode);
99 set_nlink(inode, be16_to_cpu(efs_inode->di_nlink));
100 i_uid_write(inode, (uid_t)be16_to_cpu(efs_inode->di_uid));
101 i_gid_write(inode, (gid_t)be16_to_cpu(efs_inode->di_gid));
102 inode->i_size = be32_to_cpu(efs_inode->di_size);
103 inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime);
104 inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime);
105 inode->i_ctime.tv_sec = be32_to_cpu(efs_inode->di_ctime);
106 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
107
108
109 if (inode->i_size == 0) {
110 inode->i_blocks = 0;
111 } else {
112 inode->i_blocks = ((inode->i_size - 1) >> EFS_BLOCKSIZE_BITS) + 1;
113 }
114
115 rdev = be16_to_cpu(efs_inode->di_u.di_dev.odev);
116 if (rdev == 0xffff) {
117 rdev = be32_to_cpu(efs_inode->di_u.di_dev.ndev);
118 if (sysv_major(rdev) > 0xfff)
119 device = 0;
120 else
121 device = MKDEV(sysv_major(rdev), sysv_minor(rdev));
122 } else
123 device = old_decode_dev(rdev);
124
125
126 in->numextents = be16_to_cpu(efs_inode->di_numextents);
127 in->lastextent = 0;
128
129
130 for(i = 0; i < EFS_DIRECTEXTENTS; i++) {
131 extent_copy(&(efs_inode->di_u.di_extents[i]), &(in->extents[i]));
132 if (i < in->numextents && in->extents[i].cooked.ex_magic != 0) {
133 pr_warn("extent %d has bad magic number in inode %lu\n",
134 i, inode->i_ino);
135 brelse(bh);
136 goto read_inode_error;
137 }
138 }
139
140 brelse(bh);
141 pr_debug("efs_iget(): inode %lu, extents %d, mode %o\n",
142 inode->i_ino, in->numextents, inode->i_mode);
143 switch (inode->i_mode & S_IFMT) {
144 case S_IFDIR:
145 inode->i_op = &efs_dir_inode_operations;
146 inode->i_fop = &efs_dir_operations;
147 break;
148 case S_IFREG:
149 inode->i_fop = &generic_ro_fops;
150 inode->i_data.a_ops = &efs_aops;
151 break;
152 case S_IFLNK:
153 inode->i_op = &page_symlink_inode_operations;
154 inode->i_data.a_ops = &efs_symlink_aops;
155 break;
156 case S_IFCHR:
157 case S_IFBLK:
158 case S_IFIFO:
159 init_special_inode(inode, inode->i_mode, device);
160 break;
161 default:
162 pr_warn("unsupported inode mode %o\n", inode->i_mode);
163 goto read_inode_error;
164 break;
165 }
166
167 unlock_new_inode(inode);
168 return inode;
169
170read_inode_error:
171 pr_warn("failed to read inode %lu\n", inode->i_ino);
172 iget_failed(inode);
173 return ERR_PTR(-EIO);
174}
175
176static inline efs_block_t
177efs_extent_check(efs_extent *ptr, efs_block_t block, struct efs_sb_info *sb) {
178 efs_block_t start;
179 efs_block_t length;
180 efs_block_t offset;
181
182
183
184
185
186 start = ptr->cooked.ex_bn;
187 length = ptr->cooked.ex_length;
188 offset = ptr->cooked.ex_offset;
189
190 if ((block >= offset) && (block < offset+length)) {
191 return(sb->fs_start + start + block - offset);
192 } else {
193 return 0;
194 }
195}
196
197efs_block_t efs_map_block(struct inode *inode, efs_block_t block) {
198 struct efs_sb_info *sb = SUPER_INFO(inode->i_sb);
199 struct efs_inode_info *in = INODE_INFO(inode);
200 struct buffer_head *bh = NULL;
201
202 int cur, last, first = 1;
203 int ibase, ioffset, dirext, direxts, indext, indexts;
204 efs_block_t iblock, result = 0, lastblock = 0;
205 efs_extent ext, *exts;
206
207 last = in->lastextent;
208
209 if (in->numextents <= EFS_DIRECTEXTENTS) {
210
211 if ((result = efs_extent_check(&in->extents[last], block, sb)))
212 return result;
213
214
215 if (in->numextents == 1) {
216 pr_err("%s() failed to map (1 extent)\n", __func__);
217 return 0;
218 }
219
220 direxts = in->numextents;
221
222
223
224
225
226 for(dirext = 1; dirext < direxts; dirext++) {
227 cur = (last + dirext) % in->numextents;
228 if ((result = efs_extent_check(&in->extents[cur], block, sb))) {
229 in->lastextent = cur;
230 return result;
231 }
232 }
233
234 pr_err("%s() failed to map block %u (dir)\n", __func__, block);
235 return 0;
236 }
237
238 pr_debug("%s(): indirect search for logical block %u\n",
239 __func__, block);
240 direxts = in->extents[0].cooked.ex_offset;
241 indexts = in->numextents;
242
243 for(indext = 0; indext < indexts; indext++) {
244 cur = (last + indext) % indexts;
245
246
247
248
249
250
251
252
253 ibase = 0;
254 for(dirext = 0; cur < ibase && dirext < direxts; dirext++) {
255 ibase += in->extents[dirext].cooked.ex_length *
256 (EFS_BLOCKSIZE / sizeof(efs_extent));
257 }
258
259 if (dirext == direxts) {
260
261 pr_err("couldn't find direct extent for indirect extent %d (block %u)\n",
262 cur, block);
263 if (bh) brelse(bh);
264 return 0;
265 }
266
267
268 iblock = sb->fs_start + in->extents[dirext].cooked.ex_bn +
269 (cur - ibase) /
270 (EFS_BLOCKSIZE / sizeof(efs_extent));
271 ioffset = (cur - ibase) %
272 (EFS_BLOCKSIZE / sizeof(efs_extent));
273
274 if (first || lastblock != iblock) {
275 if (bh) brelse(bh);
276
277 bh = sb_bread(inode->i_sb, iblock);
278 if (!bh) {
279 pr_err("%s() failed at block %d\n",
280 __func__, iblock);
281 return 0;
282 }
283 pr_debug("%s(): read indirect extent block %d\n",
284 __func__, iblock);
285 first = 0;
286 lastblock = iblock;
287 }
288
289 exts = (efs_extent *) bh->b_data;
290
291 extent_copy(&(exts[ioffset]), &ext);
292
293 if (ext.cooked.ex_magic != 0) {
294 pr_err("extent %d has bad magic number in block %d\n",
295 cur, iblock);
296 if (bh) brelse(bh);
297 return 0;
298 }
299
300 if ((result = efs_extent_check(&ext, block, sb))) {
301 if (bh) brelse(bh);
302 in->lastextent = cur;
303 return result;
304 }
305 }
306 if (bh) brelse(bh);
307 pr_err("%s() failed to map block %u (indir)\n", __func__, block);
308 return 0;
309}
310
311MODULE_LICENSE("GPL");
312