1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/types.h>
24#include <linux/buffer_head.h>
25#include <linux/string.h>
26#include <linux/errno.h>
27#include "nilfs.h"
28#include "mdt.h"
29#include "alloc.h"
30#include "dat.h"
31
32
33#define NILFS_CNO_MIN ((__u64)1)
34#define NILFS_CNO_MAX (~(__u64)0)
35
36static int nilfs_dat_prepare_entry(struct inode *dat,
37 struct nilfs_palloc_req *req, int create)
38{
39 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
40 create, &req->pr_entry_bh);
41}
42
43static void nilfs_dat_commit_entry(struct inode *dat,
44 struct nilfs_palloc_req *req)
45{
46 nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
47 nilfs_mdt_mark_dirty(dat);
48 brelse(req->pr_entry_bh);
49}
50
51static void nilfs_dat_abort_entry(struct inode *dat,
52 struct nilfs_palloc_req *req)
53{
54 brelse(req->pr_entry_bh);
55}
56
57int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
58{
59 int ret;
60
61 ret = nilfs_palloc_prepare_alloc_entry(dat, req);
62 if (ret < 0)
63 return ret;
64
65 ret = nilfs_dat_prepare_entry(dat, req, 1);
66 if (ret < 0)
67 nilfs_palloc_abort_alloc_entry(dat, req);
68
69 return ret;
70}
71
72void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
73{
74 struct nilfs_dat_entry *entry;
75 void *kaddr;
76
77 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
78 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
79 req->pr_entry_bh, kaddr);
80 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
81 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
82 entry->de_blocknr = cpu_to_le64(0);
83 kunmap_atomic(kaddr, KM_USER0);
84
85 nilfs_palloc_commit_alloc_entry(dat, req);
86 nilfs_dat_commit_entry(dat, req);
87}
88
89void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
90{
91 nilfs_dat_abort_entry(dat, req);
92 nilfs_palloc_abort_alloc_entry(dat, req);
93}
94
95void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
96{
97 struct nilfs_dat_entry *entry;
98 void *kaddr;
99
100 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
101 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
102 req->pr_entry_bh, kaddr);
103 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
104 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
105 entry->de_blocknr = cpu_to_le64(0);
106 kunmap_atomic(kaddr, KM_USER0);
107
108 nilfs_dat_commit_entry(dat, req);
109 nilfs_palloc_commit_free_entry(dat, req);
110}
111
112int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
113{
114 int ret;
115
116 ret = nilfs_dat_prepare_entry(dat, req, 0);
117 WARN_ON(ret == -ENOENT);
118 return ret;
119}
120
121void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
122 sector_t blocknr)
123{
124 struct nilfs_dat_entry *entry;
125 void *kaddr;
126
127 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
128 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
129 req->pr_entry_bh, kaddr);
130 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
131 entry->de_blocknr = cpu_to_le64(blocknr);
132 kunmap_atomic(kaddr, KM_USER0);
133
134 nilfs_dat_commit_entry(dat, req);
135}
136
137int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
138{
139 struct nilfs_dat_entry *entry;
140 __u64 start;
141 sector_t blocknr;
142 void *kaddr;
143 int ret;
144
145 ret = nilfs_dat_prepare_entry(dat, req, 0);
146 if (ret < 0) {
147 WARN_ON(ret == -ENOENT);
148 return ret;
149 }
150
151 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
152 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
153 req->pr_entry_bh, kaddr);
154 start = le64_to_cpu(entry->de_start);
155 blocknr = le64_to_cpu(entry->de_blocknr);
156 kunmap_atomic(kaddr, KM_USER0);
157
158 if (blocknr == 0) {
159 ret = nilfs_palloc_prepare_free_entry(dat, req);
160 if (ret < 0) {
161 nilfs_dat_abort_entry(dat, req);
162 return ret;
163 }
164 }
165
166 return 0;
167}
168
169void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
170 int dead)
171{
172 struct nilfs_dat_entry *entry;
173 __u64 start, end;
174 sector_t blocknr;
175 void *kaddr;
176
177 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
178 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
179 req->pr_entry_bh, kaddr);
180 end = start = le64_to_cpu(entry->de_start);
181 if (!dead) {
182 end = nilfs_mdt_cno(dat);
183 WARN_ON(start > end);
184 }
185 entry->de_end = cpu_to_le64(end);
186 blocknr = le64_to_cpu(entry->de_blocknr);
187 kunmap_atomic(kaddr, KM_USER0);
188
189 if (blocknr == 0)
190 nilfs_dat_commit_free(dat, req);
191 else
192 nilfs_dat_commit_entry(dat, req);
193}
194
195void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
196{
197 struct nilfs_dat_entry *entry;
198 __u64 start;
199 sector_t blocknr;
200 void *kaddr;
201
202 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
203 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
204 req->pr_entry_bh, kaddr);
205 start = le64_to_cpu(entry->de_start);
206 blocknr = le64_to_cpu(entry->de_blocknr);
207 kunmap_atomic(kaddr, KM_USER0);
208
209 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
210 nilfs_palloc_abort_free_entry(dat, req);
211 nilfs_dat_abort_entry(dat, req);
212}
213
214int nilfs_dat_prepare_update(struct inode *dat,
215 struct nilfs_palloc_req *oldreq,
216 struct nilfs_palloc_req *newreq)
217{
218 int ret;
219
220 ret = nilfs_dat_prepare_end(dat, oldreq);
221 if (!ret) {
222 ret = nilfs_dat_prepare_alloc(dat, newreq);
223 if (ret < 0)
224 nilfs_dat_abort_end(dat, oldreq);
225 }
226 return ret;
227}
228
229void nilfs_dat_commit_update(struct inode *dat,
230 struct nilfs_palloc_req *oldreq,
231 struct nilfs_palloc_req *newreq, int dead)
232{
233 nilfs_dat_commit_end(dat, oldreq, dead);
234 nilfs_dat_commit_alloc(dat, newreq);
235}
236
237void nilfs_dat_abort_update(struct inode *dat,
238 struct nilfs_palloc_req *oldreq,
239 struct nilfs_palloc_req *newreq)
240{
241 nilfs_dat_abort_end(dat, oldreq);
242 nilfs_dat_abort_alloc(dat, newreq);
243}
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
260{
261 struct nilfs_palloc_req req;
262 int ret;
263
264 req.pr_entry_nr = vblocknr;
265 ret = nilfs_dat_prepare_entry(dat, &req, 0);
266 if (ret == 0)
267 nilfs_dat_commit_entry(dat, &req);
268 return ret;
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
290{
291 return nilfs_palloc_freev(dat, vblocknrs, nitems);
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
311{
312 struct buffer_head *entry_bh;
313 struct nilfs_dat_entry *entry;
314 void *kaddr;
315 int ret;
316
317 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
318 if (ret < 0)
319 return ret;
320 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
321 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
322 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
323 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
324 (unsigned long long)vblocknr,
325 (unsigned long long)le64_to_cpu(entry->de_start),
326 (unsigned long long)le64_to_cpu(entry->de_end));
327 kunmap_atomic(kaddr, KM_USER0);
328 brelse(entry_bh);
329 return -EINVAL;
330 }
331 WARN_ON(blocknr == 0);
332 entry->de_blocknr = cpu_to_le64(blocknr);
333 kunmap_atomic(kaddr, KM_USER0);
334
335 nilfs_mdt_mark_buffer_dirty(entry_bh);
336 nilfs_mdt_mark_dirty(dat);
337
338 brelse(entry_bh);
339
340 return 0;
341}
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
363{
364 struct buffer_head *entry_bh;
365 struct nilfs_dat_entry *entry;
366 sector_t blocknr;
367 void *kaddr;
368 int ret;
369
370 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
371 if (ret < 0)
372 return ret;
373
374 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
375 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
376 blocknr = le64_to_cpu(entry->de_blocknr);
377 if (blocknr == 0) {
378 ret = -ENOENT;
379 goto out;
380 }
381 if (blocknrp != NULL)
382 *blocknrp = blocknr;
383
384 out:
385 kunmap_atomic(kaddr, KM_USER0);
386 brelse(entry_bh);
387 return ret;
388}
389
390ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
391 size_t nvi)
392{
393 struct buffer_head *entry_bh;
394 struct nilfs_dat_entry *entry;
395 struct nilfs_vinfo *vinfo = buf;
396 __u64 first, last;
397 void *kaddr;
398 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
399 int i, j, n, ret;
400
401 for (i = 0; i < nvi; i += n) {
402 ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
403 0, &entry_bh);
404 if (ret < 0)
405 return ret;
406 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
407
408 first = vinfo->vi_vblocknr;
409 do_div(first, entries_per_block);
410 first *= entries_per_block;
411 last = first + entries_per_block - 1;
412 for (j = i, n = 0;
413 j < nvi && vinfo->vi_vblocknr >= first &&
414 vinfo->vi_vblocknr <= last;
415 j++, n++, vinfo = (void *)vinfo + visz) {
416 entry = nilfs_palloc_block_get_entry(
417 dat, vinfo->vi_vblocknr, entry_bh, kaddr);
418 vinfo->vi_start = le64_to_cpu(entry->de_start);
419 vinfo->vi_end = le64_to_cpu(entry->de_end);
420 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
421 }
422 kunmap_atomic(kaddr, KM_USER0);
423 brelse(entry_bh);
424 }
425
426 return nvi;
427}
428