1
2#include <linux/export.h>
3#include <linux/sched/signal.h>
4#include <linux/sched/task.h>
5#include <linux/fs.h>
6#include <linux/path.h>
7#include <linux/slab.h>
8#include <linux/fs_struct.h>
9#include "internal.h"
10
11
12
13
14
15void set_fs_root(struct fs_struct *fs, const struct path *path)
16{
17 struct path old_root;
18
19 path_get(path);
20 spin_lock(&fs->lock);
21 write_seqcount_begin(&fs->seq);
22 old_root = fs->root;
23 fs->root = *path;
24 write_seqcount_end(&fs->seq);
25 spin_unlock(&fs->lock);
26 if (old_root.dentry)
27 path_put(&old_root);
28}
29
30
31
32
33
34void set_fs_pwd(struct fs_struct *fs, const struct path *path)
35{
36 struct path old_pwd;
37
38 path_get(path);
39 spin_lock(&fs->lock);
40 write_seqcount_begin(&fs->seq);
41 old_pwd = fs->pwd;
42 fs->pwd = *path;
43 write_seqcount_end(&fs->seq);
44 spin_unlock(&fs->lock);
45
46 if (old_pwd.dentry)
47 path_put(&old_pwd);
48}
49
50static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
51{
52 if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
53 return 0;
54 *p = *new;
55 return 1;
56}
57
58void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
59{
60 struct task_struct *g, *p;
61 struct fs_struct *fs;
62 int count = 0;
63
64 read_lock(&tasklist_lock);
65 do_each_thread(g, p) {
66 task_lock(p);
67 fs = p->fs;
68 if (fs) {
69 int hits = 0;
70 spin_lock(&fs->lock);
71 write_seqcount_begin(&fs->seq);
72 hits += replace_path(&fs->root, old_root, new_root);
73 hits += replace_path(&fs->pwd, old_root, new_root);
74 write_seqcount_end(&fs->seq);
75 while (hits--) {
76 count++;
77 path_get(new_root);
78 }
79 spin_unlock(&fs->lock);
80 }
81 task_unlock(p);
82 } while_each_thread(g, p);
83 read_unlock(&tasklist_lock);
84 while (count--)
85 path_put(old_root);
86}
87
88void free_fs_struct(struct fs_struct *fs)
89{
90 path_put(&fs->root);
91 path_put(&fs->pwd);
92 kmem_cache_free(fs_cachep, fs);
93}
94
95void exit_fs(struct task_struct *tsk)
96{
97 struct fs_struct *fs = tsk->fs;
98
99 if (fs) {
100 int kill;
101 task_lock(tsk);
102 spin_lock(&fs->lock);
103 tsk->fs = NULL;
104 kill = !--fs->users;
105 spin_unlock(&fs->lock);
106 task_unlock(tsk);
107 if (kill)
108 free_fs_struct(fs);
109 }
110}
111
112struct fs_struct *copy_fs_struct(struct fs_struct *old)
113{
114 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
115
116 if (fs) {
117 fs->users = 1;
118 fs->in_exec = 0;
119 spin_lock_init(&fs->lock);
120 seqcount_spinlock_init(&fs->seq, &fs->lock);
121 fs->umask = old->umask;
122
123 spin_lock(&old->lock);
124 fs->root = old->root;
125 path_get(&fs->root);
126 fs->pwd = old->pwd;
127 path_get(&fs->pwd);
128 spin_unlock(&old->lock);
129 }
130 return fs;
131}
132
133int unshare_fs_struct(void)
134{
135 struct fs_struct *fs = current->fs;
136 struct fs_struct *new_fs = copy_fs_struct(fs);
137 int kill;
138
139 if (!new_fs)
140 return -ENOMEM;
141
142 task_lock(current);
143 spin_lock(&fs->lock);
144 kill = !--fs->users;
145 current->fs = new_fs;
146 spin_unlock(&fs->lock);
147 task_unlock(current);
148
149 if (kill)
150 free_fs_struct(fs);
151
152 return 0;
153}
154EXPORT_SYMBOL_GPL(unshare_fs_struct);
155
156int current_umask(void)
157{
158 return current->fs->umask;
159}
160EXPORT_SYMBOL(current_umask);
161
162
163struct fs_struct init_fs = {
164 .users = 1,
165 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
166 .seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock),
167 .umask = 0022,
168};
169