1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef __INTERNAL_H
14#define __INTERNAL_H
15
16#include <linux/fs.h>
17#include <linux/dcache.h>
18#include <linux/mm.h>
19#include <linux/pagemap.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/cleancache.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "erofs_fs.h"
26
27
28#undef pr_fmt
29#define pr_fmt(fmt) "erofs: " fmt
30
31#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__)
32#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__)
33#ifdef CONFIG_EROFS_FS_DEBUG
34#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
35
36#define dbg_might_sleep might_sleep
37#define DBG_BUGON BUG_ON
38#else
39#define debugln(x, ...) ((void)0)
40
41#define dbg_might_sleep() ((void)0)
42#define DBG_BUGON(x) ((void)(x))
43#endif
44
45enum {
46 FAULT_KMALLOC,
47 FAULT_MAX,
48};
49
50#ifdef CONFIG_EROFS_FAULT_INJECTION
51extern char *erofs_fault_name[FAULT_MAX];
52#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
53
54struct erofs_fault_info {
55 atomic_t inject_ops;
56 unsigned int inject_rate;
57 unsigned int inject_type;
58};
59#endif
60
61#ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
62#define EROFS_FS_ZIP_CACHE_LVL (2)
63#elif defined(EROFS_FS_ZIP_CACHE_UNIPOLAR)
64#define EROFS_FS_ZIP_CACHE_LVL (1)
65#else
66#define EROFS_FS_ZIP_CACHE_LVL (0)
67#endif
68
69#if (!defined(EROFS_FS_HAS_MANAGED_CACHE) && (EROFS_FS_ZIP_CACHE_LVL > 0))
70#define EROFS_FS_HAS_MANAGED_CACHE
71#endif
72
73
74#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
75
76typedef u64 erofs_nid_t;
77
78struct erofs_sb_info {
79
80 struct list_head list;
81 struct mutex umount_mutex;
82
83 u32 blocks;
84 u32 meta_blkaddr;
85#ifdef CONFIG_EROFS_FS_XATTR
86 u32 xattr_blkaddr;
87#endif
88
89
90 unsigned char islotbits;
91#ifdef CONFIG_EROFS_FS_ZIP
92
93 unsigned char clusterbits;
94
95
96 struct radix_tree_root workstn_tree;
97
98
99 unsigned int max_sync_decompress_pages;
100
101#ifdef EROFS_FS_HAS_MANAGED_CACHE
102 struct inode *managed_cache;
103#endif
104
105#endif
106
107 u32 build_time_nsec;
108 u64 build_time;
109
110
111 erofs_nid_t root_nid;
112
113 u64 inos;
114
115 u8 uuid[16];
116 u8 volume_name[16];
117 char *dev_name;
118
119 unsigned int mount_opt;
120 unsigned int shrinker_run_no;
121
122#ifdef CONFIG_EROFS_FAULT_INJECTION
123 struct erofs_fault_info fault_info;
124#endif
125};
126
127#ifdef CONFIG_EROFS_FAULT_INJECTION
128#define erofs_show_injection_info(type) \
129 infoln("inject %s in %s of %pS", erofs_fault_name[type], \
130 __func__, __builtin_return_address(0))
131
132static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
133{
134 struct erofs_fault_info *ffi = &sbi->fault_info;
135
136 if (!ffi->inject_rate)
137 return false;
138
139 if (!IS_FAULT_SET(ffi, type))
140 return false;
141
142 atomic_inc(&ffi->inject_ops);
143 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
144 atomic_set(&ffi->inject_ops, 0);
145 return true;
146 }
147 return false;
148}
149#else
150static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
151{
152 return false;
153}
154
155static inline void erofs_show_injection_info(int type)
156{
157}
158#endif
159
160static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
161 size_t size, gfp_t flags)
162{
163 if (time_to_inject(sbi, FAULT_KMALLOC)) {
164 erofs_show_injection_info(FAULT_KMALLOC);
165 return NULL;
166 }
167 return kmalloc(size, flags);
168}
169
170#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
171#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
172
173
174#define EROFS_MOUNT_XATTR_USER 0x00000010
175#define EROFS_MOUNT_POSIX_ACL 0x00000020
176#define EROFS_MOUNT_FAULT_INJECTION 0x00000040
177
178#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
179#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
180#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
181
182#ifdef CONFIG_EROFS_FS_ZIP
183#define erofs_workstn_lock(sbi) xa_lock(&(sbi)->workstn_tree)
184#define erofs_workstn_unlock(sbi) xa_unlock(&(sbi)->workstn_tree)
185
186
187struct erofs_workgroup {
188
189 pgoff_t index;
190
191
192 atomic_t refcount;
193};
194
195#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
196
197#if defined(CONFIG_SMP)
198static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
199 int val)
200{
201 preempt_disable();
202 if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
203 preempt_enable();
204 return false;
205 }
206 return true;
207}
208
209static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
210 int orig_val)
211{
212
213
214
215
216 smp_mb();
217 atomic_set(&grp->refcount, orig_val);
218 preempt_enable();
219}
220
221static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
222{
223 return atomic_cond_read_relaxed(&grp->refcount,
224 VAL != EROFS_LOCKED_MAGIC);
225}
226#else
227static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
228 int val)
229{
230 preempt_disable();
231
232 if (val != atomic_read(&grp->refcount)) {
233 preempt_enable();
234 return false;
235 }
236 return true;
237}
238
239static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
240 int orig_val)
241{
242 preempt_enable();
243}
244
245static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
246{
247 int v = atomic_read(&grp->refcount);
248
249
250 DBG_BUGON(v == EROFS_LOCKED_MAGIC);
251 return v;
252}
253#endif
254
255int erofs_workgroup_put(struct erofs_workgroup *grp);
256struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
257 pgoff_t index, bool *tag);
258int erofs_register_workgroup(struct super_block *sb,
259 struct erofs_workgroup *grp, bool tag);
260unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
261 unsigned long nr_shrink, bool cleanup);
262void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
263
264#ifdef EROFS_FS_HAS_MANAGED_CACHE
265int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
266 struct erofs_workgroup *egrp);
267int erofs_try_to_free_cached_page(struct address_space *mapping,
268 struct page *page);
269
270#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
271#else
272#define MNGD_MAPPING(sbi) (NULL)
273#endif
274
275#define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES 3
276
277static inline bool __should_decompress_synchronously(struct erofs_sb_info *sbi,
278 unsigned int nr)
279{
280 return nr <= sbi->max_sync_decompress_pages;
281}
282
283int __init z_erofs_init_zip_subsystem(void);
284void z_erofs_exit_zip_subsystem(void);
285#else
286
287static inline int z_erofs_init_zip_subsystem(void) { return 0; }
288static inline void z_erofs_exit_zip_subsystem(void) {}
289#endif
290
291
292#define LOG_BLOCK_SIZE PAGE_SHIFT
293
294#undef LOG_SECTORS_PER_BLOCK
295#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
296
297#undef SECTORS_PER_BLOCK
298#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
299
300#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
301
302#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
303#error erofs cannot be used in this platform
304#endif
305
306#define ROOT_NID(sb) ((sb)->root_nid)
307
308#ifdef CONFIG_EROFS_FS_ZIP
309
310#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
311
312
313#define erofs_clusterpages(sbi) ((1 << (sbi)->clusterbits) / PAGE_SIZE)
314#endif
315
316typedef u64 erofs_off_t;
317
318
319typedef u32 erofs_blk_t;
320
321#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
322#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
323#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
324
325static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
326{
327 return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
328}
329
330
331#define EROFS_V_EA_INITED_BIT 0
332
333
334#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1)
335
336struct erofs_vnode {
337 erofs_nid_t nid;
338
339
340 unsigned long flags;
341
342 unsigned char data_mapping_mode;
343
344 unsigned char inode_isize;
345 unsigned short xattr_isize;
346
347 unsigned xattr_shared_count;
348 unsigned *xattr_shared_xattrs;
349
350 erofs_blk_t raw_blkaddr;
351
352
353 struct inode vfs_inode;
354};
355
356#define EROFS_V(ptr) \
357 container_of(ptr, struct erofs_vnode, vfs_inode)
358
359#define __inode_advise(x, bit, bits) \
360 (((x) >> (bit)) & ((1 << (bits)) - 1))
361
362#define __inode_version(advise) \
363 __inode_advise(advise, EROFS_I_VERSION_BIT, \
364 EROFS_I_VERSION_BITS)
365
366#define __inode_data_mapping(advise) \
367 __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\
368 EROFS_I_DATA_MAPPING_BITS)
369
370static inline unsigned long inode_datablocks(struct inode *inode)
371{
372
373 return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
374}
375
376static inline bool is_inode_layout_plain(struct inode *inode)
377{
378 return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_PLAIN;
379}
380
381static inline bool is_inode_layout_compression(struct inode *inode)
382{
383 return EROFS_V(inode)->data_mapping_mode ==
384 EROFS_INODE_LAYOUT_COMPRESSION;
385}
386
387static inline bool is_inode_layout_inline(struct inode *inode)
388{
389 return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_INLINE;
390}
391
392extern const struct super_operations erofs_sops;
393
394extern const struct address_space_operations erofs_raw_access_aops;
395#ifdef CONFIG_EROFS_FS_ZIP
396extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
397#endif
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424enum {
425 BH_Zipped = BH_PrivateStart,
426};
427
428
429#define EROFS_MAP_MAPPED (1 << BH_Mapped)
430
431#define EROFS_MAP_META (1 << BH_Meta)
432
433#define EROFS_MAP_ZIPPED (1 << BH_Zipped)
434
435struct erofs_map_blocks {
436 erofs_off_t m_pa, m_la;
437 u64 m_plen, m_llen;
438
439 unsigned int m_flags;
440
441 struct page *mpage;
442};
443
444
445#define EROFS_GET_BLOCKS_RAW 0x0001
446
447#ifdef CONFIG_EROFS_FS_ZIP
448int z_erofs_map_blocks_iter(struct inode *inode,
449 struct erofs_map_blocks *map,
450 int flags);
451#else
452static inline int z_erofs_map_blocks_iter(struct inode *inode,
453 struct erofs_map_blocks *map,
454 int flags)
455{
456 return -ENOTSUPP;
457}
458#endif
459
460
461static inline struct bio *
462erofs_grab_bio(struct super_block *sb,
463 erofs_blk_t blkaddr, unsigned int nr_pages,
464 bio_end_io_t endio, bool nofail)
465{
466 const gfp_t gfp = GFP_NOIO;
467 struct bio *bio;
468
469 do {
470 if (nr_pages == 1) {
471 bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1);
472 if (unlikely(bio == NULL)) {
473 DBG_BUGON(nofail);
474 return ERR_PTR(-ENOMEM);
475 }
476 break;
477 }
478 bio = bio_alloc(gfp, nr_pages);
479 nr_pages /= 2;
480 } while (unlikely(bio == NULL));
481
482 bio->bi_end_io = endio;
483 bio_set_dev(bio, sb->s_bdev);
484 bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
485 return bio;
486}
487
488static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
489{
490 bio_set_op_attrs(bio, op, op_flags);
491 submit_bio(bio);
492}
493
494#ifndef CONFIG_EROFS_FS_IO_MAX_RETRIES
495#define EROFS_IO_MAX_RETRIES_NOFAIL 0
496#else
497#define EROFS_IO_MAX_RETRIES_NOFAIL CONFIG_EROFS_FS_IO_MAX_RETRIES
498#endif
499
500struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr,
501 bool prio, bool nofail);
502
503static inline struct page *erofs_get_meta_page(struct super_block *sb,
504 erofs_blk_t blkaddr, bool prio)
505{
506 return __erofs_get_meta_page(sb, blkaddr, prio, false);
507}
508
509static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb,
510 erofs_blk_t blkaddr, bool prio)
511{
512 return __erofs_get_meta_page(sb, blkaddr, prio, true);
513}
514
515int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
516
517static inline struct page *
518erofs_get_inline_page(struct inode *inode,
519 erofs_blk_t blkaddr)
520{
521 return erofs_get_meta_page(inode->i_sb,
522 blkaddr, S_ISDIR(inode->i_mode));
523}
524
525
526static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
527{
528#if BITS_PER_LONG == 32
529 return (nid >> 32) ^ (nid & 0xffffffff);
530#else
531 return nid;
532#endif
533}
534
535extern const struct inode_operations erofs_generic_iops;
536extern const struct inode_operations erofs_symlink_iops;
537extern const struct inode_operations erofs_fast_symlink_iops;
538
539static inline void set_inode_fast_symlink(struct inode *inode)
540{
541 inode->i_op = &erofs_fast_symlink_iops;
542}
543
544static inline bool is_inode_fast_symlink(struct inode *inode)
545{
546 return inode->i_op == &erofs_fast_symlink_iops;
547}
548
549struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
550
551
552extern const struct inode_operations erofs_dir_iops;
553
554int erofs_namei(struct inode *dir, struct qstr *name,
555 erofs_nid_t *nid, unsigned int *d_type);
556
557
558extern const struct file_operations erofs_dir_fops;
559
560static inline void *erofs_vmap(struct page **pages, unsigned int count)
561{
562#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
563 int i = 0;
564
565 while (1) {
566 void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
567
568 if (addr != NULL || ++i >= 3)
569 return addr;
570 vm_unmap_aliases();
571 }
572 return NULL;
573#else
574 return vmap(pages, count, VM_MAP, PAGE_KERNEL);
575#endif
576}
577
578static inline void erofs_vunmap(const void *mem, unsigned int count)
579{
580#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
581 vm_unmap_ram(mem, count);
582#else
583 vunmap(mem);
584#endif
585}
586
587
588extern struct shrinker erofs_shrinker_info;
589
590struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
591void erofs_register_super(struct super_block *sb);
592void erofs_unregister_super(struct super_block *sb);
593
594#ifndef lru_to_page
595#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
596#endif
597
598#endif
599
600