1
2
3
4
5
6
7
8
9
10
11
12#define START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
13
14
15#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
16
17
18#define FREE_NID_PAGES 8
19#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
20
21#define DEF_RA_NID_PAGES 0
22
23
24#define MAX_RA_NODE 128
25
26
27#define DEF_RAM_THRESHOLD 1
28
29
30#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10
31
32#define DEF_NAT_CACHE_THRESHOLD 100000
33
34
35#define NATVEC_SIZE 64
36#define SETVEC_SIZE 32
37
38
39#define LOCKED_PAGE 1
40
41
42enum {
43 IS_CHECKPOINTED,
44 HAS_FSYNCED_INODE,
45 HAS_LAST_FSYNC,
46 IS_DIRTY,
47};
48
49
50
51
52struct node_info {
53 nid_t nid;
54 nid_t ino;
55 block_t blk_addr;
56 unsigned char version;
57 unsigned char flag;
58};
59
60struct nat_entry {
61 struct list_head list;
62 struct node_info ni;
63};
64
65#define nat_get_nid(nat) (nat->ni.nid)
66#define nat_set_nid(nat, n) (nat->ni.nid = n)
67#define nat_get_blkaddr(nat) (nat->ni.blk_addr)
68#define nat_set_blkaddr(nat, b) (nat->ni.blk_addr = b)
69#define nat_get_ino(nat) (nat->ni.ino)
70#define nat_set_ino(nat, i) (nat->ni.ino = i)
71#define nat_get_version(nat) (nat->ni.version)
72#define nat_set_version(nat, v) (nat->ni.version = v)
73
74#define inc_node_version(version) (++version)
75
76static inline void copy_node_info(struct node_info *dst,
77 struct node_info *src)
78{
79 dst->nid = src->nid;
80 dst->ino = src->ino;
81 dst->blk_addr = src->blk_addr;
82 dst->version = src->version;
83
84}
85
86static inline void set_nat_flag(struct nat_entry *ne,
87 unsigned int type, bool set)
88{
89 unsigned char mask = 0x01 << type;
90 if (set)
91 ne->ni.flag |= mask;
92 else
93 ne->ni.flag &= ~mask;
94}
95
96static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
97{
98 unsigned char mask = 0x01 << type;
99 return ne->ni.flag & mask;
100}
101
102static inline void nat_reset_flag(struct nat_entry *ne)
103{
104
105 set_nat_flag(ne, IS_CHECKPOINTED, true);
106 set_nat_flag(ne, HAS_FSYNCED_INODE, false);
107 set_nat_flag(ne, HAS_LAST_FSYNC, true);
108}
109
110static inline void node_info_from_raw_nat(struct node_info *ni,
111 struct f2fs_nat_entry *raw_ne)
112{
113 ni->ino = le32_to_cpu(raw_ne->ino);
114 ni->blk_addr = le32_to_cpu(raw_ne->block_addr);
115 ni->version = raw_ne->version;
116}
117
118static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
119 struct node_info *ni)
120{
121 raw_ne->ino = cpu_to_le32(ni->ino);
122 raw_ne->block_addr = cpu_to_le32(ni->blk_addr);
123 raw_ne->version = ni->version;
124}
125
126static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
127{
128 return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
129 NM_I(sbi)->dirty_nats_ratio / 100;
130}
131
132static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
133{
134 return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
135}
136
137enum mem_type {
138 FREE_NIDS,
139 NAT_ENTRIES,
140 DIRTY_DENTS,
141 INO_ENTRIES,
142 EXTENT_CACHE,
143 BASE_CHECK,
144};
145
146struct nat_entry_set {
147 struct list_head set_list;
148 struct list_head entry_list;
149 nid_t set;
150 unsigned int entry_cnt;
151};
152
153
154
155
156enum nid_state {
157 NID_NEW,
158 NID_ALLOC
159};
160
161struct free_nid {
162 struct list_head list;
163 nid_t nid;
164 int state;
165};
166
167static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
168{
169 struct f2fs_nm_info *nm_i = NM_I(sbi);
170 struct free_nid *fnid;
171
172 spin_lock(&nm_i->free_nid_list_lock);
173 if (nm_i->fcnt <= 0) {
174 spin_unlock(&nm_i->free_nid_list_lock);
175 return;
176 }
177 fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
178 *nid = fnid->nid;
179 spin_unlock(&nm_i->free_nid_list_lock);
180}
181
182
183
184
185static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
186{
187 struct f2fs_nm_info *nm_i = NM_I(sbi);
188 memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
189}
190
191static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
192{
193 struct f2fs_nm_info *nm_i = NM_I(sbi);
194 pgoff_t block_off;
195 pgoff_t block_addr;
196 int seg_off;
197
198 block_off = NAT_BLOCK_OFFSET(start);
199 seg_off = block_off >> sbi->log_blocks_per_seg;
200
201 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
202 (seg_off << sbi->log_blocks_per_seg << 1) +
203 (block_off & (sbi->blocks_per_seg - 1)));
204
205 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
206 block_addr += sbi->blocks_per_seg;
207
208 return block_addr;
209}
210
211static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
212 pgoff_t block_addr)
213{
214 struct f2fs_nm_info *nm_i = NM_I(sbi);
215
216 block_addr -= nm_i->nat_blkaddr;
217 if ((block_addr >> sbi->log_blocks_per_seg) % 2)
218 block_addr -= sbi->blocks_per_seg;
219 else
220 block_addr += sbi->blocks_per_seg;
221
222 return block_addr + nm_i->nat_blkaddr;
223}
224
225static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
226{
227 unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
228
229 f2fs_change_bit(block_off, nm_i->nat_bitmap);
230}
231
232static inline nid_t ino_of_node(struct page *node_page)
233{
234 struct f2fs_node *rn = F2FS_NODE(node_page);
235 return le32_to_cpu(rn->footer.ino);
236}
237
238static inline nid_t nid_of_node(struct page *node_page)
239{
240 struct f2fs_node *rn = F2FS_NODE(node_page);
241 return le32_to_cpu(rn->footer.nid);
242}
243
244static inline unsigned int ofs_of_node(struct page *node_page)
245{
246 struct f2fs_node *rn = F2FS_NODE(node_page);
247 unsigned flag = le32_to_cpu(rn->footer.flag);
248 return flag >> OFFSET_BIT_SHIFT;
249}
250
251static inline __u64 cpver_of_node(struct page *node_page)
252{
253 struct f2fs_node *rn = F2FS_NODE(node_page);
254 return le64_to_cpu(rn->footer.cp_ver);
255}
256
257static inline block_t next_blkaddr_of_node(struct page *node_page)
258{
259 struct f2fs_node *rn = F2FS_NODE(node_page);
260 return le32_to_cpu(rn->footer.next_blkaddr);
261}
262
263static inline void fill_node_footer(struct page *page, nid_t nid,
264 nid_t ino, unsigned int ofs, bool reset)
265{
266 struct f2fs_node *rn = F2FS_NODE(page);
267 unsigned int old_flag = 0;
268
269 if (reset)
270 memset(rn, 0, sizeof(*rn));
271 else
272 old_flag = le32_to_cpu(rn->footer.flag);
273
274 rn->footer.nid = cpu_to_le32(nid);
275 rn->footer.ino = cpu_to_le32(ino);
276
277
278 rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
279 (old_flag & OFFSET_BIT_MASK));
280}
281
282static inline void copy_node_footer(struct page *dst, struct page *src)
283{
284 struct f2fs_node *src_rn = F2FS_NODE(src);
285 struct f2fs_node *dst_rn = F2FS_NODE(dst);
286 memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
287}
288
289static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
290{
291 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
292 struct f2fs_node *rn = F2FS_NODE(page);
293 size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
294 __u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
295
296 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
297 __u64 crc = le32_to_cpu(*((__le32 *)
298 ((unsigned char *)ckpt + crc_offset)));
299 cp_ver |= (crc << 32);
300 }
301 rn->footer.cp_ver = cpu_to_le64(cp_ver);
302 rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
303}
304
305static inline bool is_recoverable_dnode(struct page *page)
306{
307 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
308 size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
309 __u64 cp_ver = cur_cp_version(ckpt);
310
311 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
312 __u64 crc = le32_to_cpu(*((__le32 *)
313 ((unsigned char *)ckpt + crc_offset)));
314 cp_ver |= (crc << 32);
315 }
316 return cpu_to_le64(cp_ver) == cpver_of_node(page);
317}
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static inline bool IS_DNODE(struct page *node_page)
341{
342 unsigned int ofs = ofs_of_node(node_page);
343
344 if (f2fs_has_xattr_block(ofs))
345 return false;
346
347 if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
348 ofs == 5 + 2 * NIDS_PER_BLOCK)
349 return false;
350 if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
351 ofs -= 6 + 2 * NIDS_PER_BLOCK;
352 if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
353 return false;
354 }
355 return true;
356}
357
358static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
359{
360 struct f2fs_node *rn = F2FS_NODE(p);
361
362 f2fs_wait_on_page_writeback(p, NODE, true);
363
364 if (i)
365 rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
366 else
367 rn->in.nid[off] = cpu_to_le32(nid);
368 return set_page_dirty(p);
369}
370
371static inline nid_t get_nid(struct page *p, int off, bool i)
372{
373 struct f2fs_node *rn = F2FS_NODE(p);
374
375 if (i)
376 return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
377 return le32_to_cpu(rn->in.nid[off]);
378}
379
380
381
382
383
384
385
386static inline int is_cold_data(struct page *page)
387{
388 return PageChecked(page);
389}
390
391static inline void set_cold_data(struct page *page)
392{
393 SetPageChecked(page);
394}
395
396static inline void clear_cold_data(struct page *page)
397{
398 ClearPageChecked(page);
399}
400
401static inline int is_node(struct page *page, int type)
402{
403 struct f2fs_node *rn = F2FS_NODE(page);
404 return le32_to_cpu(rn->footer.flag) & (1 << type);
405}
406
407#define is_cold_node(page) is_node(page, COLD_BIT_SHIFT)
408#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
409#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
410
411static inline int is_inline_node(struct page *page)
412{
413 return PageChecked(page);
414}
415
416static inline void set_inline_node(struct page *page)
417{
418 SetPageChecked(page);
419}
420
421static inline void clear_inline_node(struct page *page)
422{
423 ClearPageChecked(page);
424}
425
426static inline void set_cold_node(struct inode *inode, struct page *page)
427{
428 struct f2fs_node *rn = F2FS_NODE(page);
429 unsigned int flag = le32_to_cpu(rn->footer.flag);
430
431 if (S_ISDIR(inode->i_mode))
432 flag &= ~(0x1 << COLD_BIT_SHIFT);
433 else
434 flag |= (0x1 << COLD_BIT_SHIFT);
435 rn->footer.flag = cpu_to_le32(flag);
436}
437
438static inline void set_mark(struct page *page, int mark, int type)
439{
440 struct f2fs_node *rn = F2FS_NODE(page);
441 unsigned int flag = le32_to_cpu(rn->footer.flag);
442 if (mark)
443 flag |= (0x1 << type);
444 else
445 flag &= ~(0x1 << type);
446 rn->footer.flag = cpu_to_le32(flag);
447}
448#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
449#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
450