1
2
3
4
5
6
7
8#include <linux/string.h>
9#include <linux/slab.h>
10#include <linux/file.h>
11#include <linux/fdtable.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/security.h>
16#include <linux/eventpoll.h>
17#include <linux/rcupdate.h>
18#include <linux/mount.h>
19#include <linux/capability.h>
20#include <linux/cdev.h>
21#include <linux/fsnotify.h>
22#include <linux/sysctl.h>
23#include <linux/lglock.h>
24#include <linux/percpu_counter.h>
25#include <linux/percpu.h>
26#include <linux/hardirq.h>
27#include <linux/task_work.h>
28#include <linux/ima.h>
29
30#include <linux/atomic.h>
31
32#include "internal.h"
33
34
35struct files_stat_struct files_stat = {
36 .max_files = NR_FILE
37};
38
39DEFINE_STATIC_LGLOCK(files_lglock);
40
41
42static struct kmem_cache *filp_cachep __read_mostly;
43
44static struct percpu_counter nr_files __cacheline_aligned_in_smp;
45
46static void file_free_rcu(struct rcu_head *head)
47{
48 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
49
50 put_cred(f->f_cred);
51 kmem_cache_free(filp_cachep, f);
52}
53
54static inline void file_free(struct file *f)
55{
56 percpu_counter_dec(&nr_files);
57 file_check_state(f);
58 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
59}
60
61
62
63
64static long get_nr_files(void)
65{
66 return percpu_counter_read_positive(&nr_files);
67}
68
69
70
71
72unsigned long get_max_files(void)
73{
74 return files_stat.max_files;
75}
76EXPORT_SYMBOL_GPL(get_max_files);
77
78
79
80
81#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
82int proc_nr_files(ctl_table *table, int write,
83 void __user *buffer, size_t *lenp, loff_t *ppos)
84{
85 files_stat.nr_files = get_nr_files();
86 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
87}
88#else
89int proc_nr_files(ctl_table *table, int write,
90 void __user *buffer, size_t *lenp, loff_t *ppos)
91{
92 return -ENOSYS;
93}
94#endif
95
96
97
98
99
100
101
102
103
104
105
106struct file *get_empty_filp(void)
107{
108 const struct cred *cred = current_cred();
109 static long old_max;
110 struct file * f;
111
112
113
114
115 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
116
117
118
119
120 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
121 goto over;
122 }
123
124 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
125 if (f == NULL)
126 goto fail;
127
128 percpu_counter_inc(&nr_files);
129 f->f_cred = get_cred(cred);
130 if (security_file_alloc(f))
131 goto fail_sec;
132
133 INIT_LIST_HEAD(&f->f_u.fu_list);
134 atomic_long_set(&f->f_count, 1);
135 rwlock_init(&f->f_owner.lock);
136 spin_lock_init(&f->f_lock);
137 eventpoll_init_file(f);
138
139 return f;
140
141over:
142
143 if (get_nr_files() > old_max) {
144 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
145 old_max = get_nr_files();
146 }
147 goto fail;
148
149fail_sec:
150 file_free(f);
151fail:
152 return NULL;
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170struct file *alloc_file(struct path *path, fmode_t mode,
171 const struct file_operations *fop)
172{
173 struct file *file;
174
175 file = get_empty_filp();
176 if (!file)
177 return NULL;
178
179 file->f_path = *path;
180 file->f_mapping = path->dentry->d_inode->i_mapping;
181 file->f_mode = mode;
182 file->f_op = fop;
183
184
185
186
187
188
189
190 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
191 file_take_write(file);
192 WARN_ON(mnt_clone_write(path->mnt));
193 }
194 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
195 i_readcount_inc(path->dentry->d_inode);
196 return file;
197}
198EXPORT_SYMBOL(alloc_file);
199
200
201
202
203
204
205
206
207
208static void drop_file_write_access(struct file *file)
209{
210 struct vfsmount *mnt = file->f_path.mnt;
211 struct dentry *dentry = file->f_path.dentry;
212 struct inode *inode = dentry->d_inode;
213
214 put_write_access(inode);
215
216 if (special_file(inode->i_mode))
217 return;
218 if (file_check_writeable(file) != 0)
219 return;
220 __mnt_drop_write(mnt);
221 file_release_write(file);
222}
223
224
225
226static void __fput(struct file *file)
227{
228 struct dentry *dentry = file->f_path.dentry;
229 struct vfsmount *mnt = file->f_path.mnt;
230 struct inode *inode = dentry->d_inode;
231
232 might_sleep();
233
234 fsnotify_close(file);
235
236
237
238
239 eventpoll_release(file);
240 locks_remove_flock(file);
241
242 if (unlikely(file->f_flags & FASYNC)) {
243 if (file->f_op && file->f_op->fasync)
244 file->f_op->fasync(-1, file, 0);
245 }
246 ima_file_free(file);
247 if (file->f_op && file->f_op->release)
248 file->f_op->release(inode, file);
249 security_file_free(file);
250 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
251 !(file->f_mode & FMODE_PATH))) {
252 cdev_put(inode->i_cdev);
253 }
254 fops_put(file->f_op);
255 put_pid(file->f_owner.pid);
256 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
257 i_readcount_dec(inode);
258 if (file->f_mode & FMODE_WRITE)
259 drop_file_write_access(file);
260 file->f_path.dentry = NULL;
261 file->f_path.mnt = NULL;
262 file_free(file);
263 dput(dentry);
264 mntput(mnt);
265}
266
267static DEFINE_SPINLOCK(delayed_fput_lock);
268static LIST_HEAD(delayed_fput_list);
269static void delayed_fput(struct work_struct *unused)
270{
271 LIST_HEAD(head);
272 spin_lock_irq(&delayed_fput_lock);
273 list_splice_init(&delayed_fput_list, &head);
274 spin_unlock_irq(&delayed_fput_lock);
275 while (!list_empty(&head)) {
276 struct file *f = list_first_entry(&head, struct file, f_u.fu_list);
277 list_del_init(&f->f_u.fu_list);
278 __fput(f);
279 }
280}
281
282static void ____fput(struct callback_head *work)
283{
284 __fput(container_of(work, struct file, f_u.fu_rcuhead));
285}
286
287
288
289
290
291
292
293
294
295
296
297void flush_delayed_fput(void)
298{
299 delayed_fput(NULL);
300}
301
302static DECLARE_WORK(delayed_fput_work, delayed_fput);
303
304void fput(struct file *file)
305{
306 if (atomic_long_dec_and_test(&file->f_count)) {
307 struct task_struct *task = current;
308 file_sb_list_del(file);
309 if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) {
310 unsigned long flags;
311 spin_lock_irqsave(&delayed_fput_lock, flags);
312 list_add(&file->f_u.fu_list, &delayed_fput_list);
313 schedule_work(&delayed_fput_work);
314 spin_unlock_irqrestore(&delayed_fput_lock, flags);
315 return;
316 }
317 init_task_work(&file->f_u.fu_rcuhead, ____fput);
318 task_work_add(task, &file->f_u.fu_rcuhead, true);
319 }
320}
321
322
323
324
325
326
327
328
329
330void __fput_sync(struct file *file)
331{
332 if (atomic_long_dec_and_test(&file->f_count)) {
333 struct task_struct *task = current;
334 file_sb_list_del(file);
335 BUG_ON(!(task->flags & PF_KTHREAD));
336 __fput(file);
337 }
338}
339
340EXPORT_SYMBOL(fput);
341
342void put_filp(struct file *file)
343{
344 if (atomic_long_dec_and_test(&file->f_count)) {
345 security_file_free(file);
346 file_sb_list_del(file);
347 file_free(file);
348 }
349}
350
351static inline int file_list_cpu(struct file *file)
352{
353#ifdef CONFIG_SMP
354 return file->f_sb_list_cpu;
355#else
356 return smp_processor_id();
357#endif
358}
359
360
361static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
362{
363 struct list_head *list;
364#ifdef CONFIG_SMP
365 int cpu;
366 cpu = smp_processor_id();
367 file->f_sb_list_cpu = cpu;
368 list = per_cpu_ptr(sb->s_files, cpu);
369#else
370 list = &sb->s_files;
371#endif
372 list_add(&file->f_u.fu_list, list);
373}
374
375
376
377
378
379
380
381
382
383void file_sb_list_add(struct file *file, struct super_block *sb)
384{
385 lg_local_lock(&files_lglock);
386 __file_sb_list_add(file, sb);
387 lg_local_unlock(&files_lglock);
388}
389
390
391
392
393
394
395
396
397void file_sb_list_del(struct file *file)
398{
399 if (!list_empty(&file->f_u.fu_list)) {
400 lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
401 list_del_init(&file->f_u.fu_list);
402 lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
403 }
404}
405
406#ifdef CONFIG_SMP
407
408
409
410
411
412#define do_file_list_for_each_entry(__sb, __file) \
413{ \
414 int i; \
415 for_each_possible_cpu(i) { \
416 struct list_head *list; \
417 list = per_cpu_ptr((__sb)->s_files, i); \
418 list_for_each_entry((__file), list, f_u.fu_list)
419
420#define while_file_list_for_each_entry \
421 } \
422}
423
424#else
425
426#define do_file_list_for_each_entry(__sb, __file) \
427{ \
428 struct list_head *list; \
429 list = &(sb)->s_files; \
430 list_for_each_entry((__file), list, f_u.fu_list)
431
432#define while_file_list_for_each_entry \
433}
434
435#endif
436
437
438
439
440
441
442
443
444void mark_files_ro(struct super_block *sb)
445{
446 struct file *f;
447
448 lg_global_lock(&files_lglock);
449 do_file_list_for_each_entry(sb, f) {
450 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
451 continue;
452 if (!file_count(f))
453 continue;
454 if (!(f->f_mode & FMODE_WRITE))
455 continue;
456 spin_lock(&f->f_lock);
457 f->f_mode &= ~FMODE_WRITE;
458 spin_unlock(&f->f_lock);
459 if (file_check_writeable(f) != 0)
460 continue;
461 file_release_write(f);
462 mnt_drop_write_file(f);
463 } while_file_list_for_each_entry;
464 lg_global_unlock(&files_lglock);
465}
466
467void __init files_init(unsigned long mempages)
468{
469 unsigned long n;
470
471 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
472 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
473
474
475
476
477
478
479 n = (mempages * (PAGE_SIZE / 1024)) / 10;
480 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
481 files_defer_init();
482 lg_lock_init(&files_lglock, "files_lglock");
483 percpu_counter_init(&nr_files, 0);
484}
485