1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/slab.h>
15#include <linux/sched/autogroup.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/coredump.h>
18#include <linux/sched/user.h>
19#include <linux/sched/numa_balancing.h>
20#include <linux/sched/stat.h>
21#include <linux/sched/task.h>
22#include <linux/sched/task_stack.h>
23#include <linux/sched/cputime.h>
24#include <linux/rtmutex.h>
25#include <linux/init.h>
26#include <linux/unistd.h>
27#include <linux/module.h>
28#include <linux/vmalloc.h>
29#include <linux/completion.h>
30#include <linux/personality.h>
31#include <linux/mempolicy.h>
32#include <linux/sem.h>
33#include <linux/file.h>
34#include <linux/fdtable.h>
35#include <linux/iocontext.h>
36#include <linux/key.h>
37#include <linux/binfmts.h>
38#include <linux/mman.h>
39#include <linux/mmu_notifier.h>
40#include <linux/hmm.h>
41#include <linux/fs.h>
42#include <linux/mm.h>
43#include <linux/vmacache.h>
44#include <linux/nsproxy.h>
45#include <linux/capability.h>
46#include <linux/cpu.h>
47#include <linux/cgroup.h>
48#include <linux/security.h>
49#include <linux/hugetlb.h>
50#include <linux/seccomp.h>
51#include <linux/swap.h>
52#include <linux/syscalls.h>
53#include <linux/jiffies.h>
54#include <linux/futex.h>
55#include <linux/compat.h>
56#include <linux/kthread.h>
57#include <linux/task_io_accounting_ops.h>
58#include <linux/rcupdate.h>
59#include <linux/ptrace.h>
60#include <linux/mount.h>
61#include <linux/audit.h>
62#include <linux/memcontrol.h>
63#include <linux/ftrace.h>
64#include <linux/proc_fs.h>
65#include <linux/profile.h>
66#include <linux/rmap.h>
67#include <linux/ksm.h>
68#include <linux/acct.h>
69#include <linux/userfaultfd_k.h>
70#include <linux/tsacct_kern.h>
71#include <linux/cn_proc.h>
72#include <linux/freezer.h>
73#include <linux/delayacct.h>
74#include <linux/taskstats_kern.h>
75#include <linux/random.h>
76#include <linux/tty.h>
77#include <linux/blkdev.h>
78#include <linux/fs_struct.h>
79#include <linux/magic.h>
80#include <linux/perf_event.h>
81#include <linux/posix-timers.h>
82#include <linux/user-return-notifier.h>
83#include <linux/oom.h>
84#include <linux/khugepaged.h>
85#include <linux/signalfd.h>
86#include <linux/uprobes.h>
87#include <linux/aio.h>
88#include <linux/compiler.h>
89#include <linux/sysctl.h>
90#include <linux/kcov.h>
91#include <linux/livepatch.h>
92#include <linux/thread_info.h>
93
94#include <asm/pgtable.h>
95#include <asm/pgalloc.h>
96#include <linux/uaccess.h>
97#include <asm/mmu_context.h>
98#include <asm/cacheflush.h>
99#include <asm/tlbflush.h>
100
101#include <trace/events/sched.h>
102
103#define CREATE_TRACE_POINTS
104#include <trace/events/task.h>
105
106
107
108
109#define MIN_THREADS 20
110
111
112
113
114#define MAX_THREADS FUTEX_TID_MASK
115
116
117
118
119unsigned long total_forks;
120int nr_threads;
121
122int max_threads;
123
124DEFINE_PER_CPU(unsigned long, process_counts) = 0;
125
126__cacheline_aligned DEFINE_RWLOCK(tasklist_lock);
127
128#ifdef CONFIG_PROVE_RCU
129int lockdep_tasklist_lock_is_held(void)
130{
131 return lockdep_is_held(&tasklist_lock);
132}
133EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
134#endif
135
136int nr_processes(void)
137{
138 int cpu;
139 int total = 0;
140
141 for_each_possible_cpu(cpu)
142 total += per_cpu(process_counts, cpu);
143
144 return total;
145}
146
147void __weak arch_release_task_struct(struct task_struct *tsk)
148{
149}
150
151#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
152static struct kmem_cache *task_struct_cachep;
153
154static inline struct task_struct *alloc_task_struct_node(int node)
155{
156 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
157}
158
159static inline void free_task_struct(struct task_struct *tsk)
160{
161 kmem_cache_free(task_struct_cachep, tsk);
162}
163#endif
164
165void __weak arch_release_thread_stack(unsigned long *stack)
166{
167}
168
169#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
170
171
172
173
174
175# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
176
177#ifdef CONFIG_VMAP_STACK
178
179
180
181
182#define NR_CACHED_STACKS 2
183static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
184
185static int free_vm_stack_cache(unsigned int cpu)
186{
187 struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
188 int i;
189
190 for (i = 0; i < NR_CACHED_STACKS; i++) {
191 struct vm_struct *vm_stack = cached_vm_stacks[i];
192
193 if (!vm_stack)
194 continue;
195
196 vfree(vm_stack->addr);
197 cached_vm_stacks[i] = NULL;
198 }
199
200 return 0;
201}
202#endif
203
204static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
205{
206#ifdef CONFIG_VMAP_STACK
207 void *stack;
208 int i;
209
210 for (i = 0; i < NR_CACHED_STACKS; i++) {
211 struct vm_struct *s;
212
213 s = this_cpu_xchg(cached_stacks[i], NULL);
214
215 if (!s)
216 continue;
217
218#ifdef CONFIG_DEBUG_KMEMLEAK
219
220 memset(s->addr, 0, THREAD_SIZE);
221#endif
222 tsk->stack_vm_area = s;
223 return s->addr;
224 }
225
226 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
227 VMALLOC_START, VMALLOC_END,
228 THREADINFO_GFP,
229 PAGE_KERNEL,
230 0, node, __builtin_return_address(0));
231
232
233
234
235
236
237 if (stack)
238 tsk->stack_vm_area = find_vm_area(stack);
239 return stack;
240#else
241 struct page *page = alloc_pages_node(node, THREADINFO_GFP,
242 THREAD_SIZE_ORDER);
243
244 return page ? page_address(page) : NULL;
245#endif
246}
247
248static inline void free_thread_stack(struct task_struct *tsk)
249{
250#ifdef CONFIG_VMAP_STACK
251 if (task_stack_vm_area(tsk)) {
252 int i;
253
254 for (i = 0; i < NR_CACHED_STACKS; i++) {
255 if (this_cpu_cmpxchg(cached_stacks[i],
256 NULL, tsk->stack_vm_area) != NULL)
257 continue;
258
259 return;
260 }
261
262 vfree_atomic(tsk->stack);
263 return;
264 }
265#endif
266
267 __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
268}
269# else
270static struct kmem_cache *thread_stack_cache;
271
272static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
273 int node)
274{
275 return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
276}
277
278static void free_thread_stack(struct task_struct *tsk)
279{
280 kmem_cache_free(thread_stack_cache, tsk->stack);
281}
282
283void thread_stack_cache_init(void)
284{
285 thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
286 THREAD_SIZE, 0, NULL);
287 BUG_ON(thread_stack_cache == NULL);
288}
289# endif
290#endif
291
292
293static struct kmem_cache *signal_cachep;
294
295
296struct kmem_cache *sighand_cachep;
297
298
299struct kmem_cache *files_cachep;
300
301
302struct kmem_cache *fs_cachep;
303
304
305struct kmem_cache *vm_area_cachep;
306
307
308static struct kmem_cache *mm_cachep;
309
310static void account_kernel_stack(struct task_struct *tsk, int account)
311{
312 void *stack = task_stack_page(tsk);
313 struct vm_struct *vm = task_stack_vm_area(tsk);
314
315 BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
316
317 if (vm) {
318 int i;
319
320 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
321
322 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
323 mod_zone_page_state(page_zone(vm->pages[i]),
324 NR_KERNEL_STACK_KB,
325 PAGE_SIZE / 1024 * account);
326 }
327
328
329 mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB,
330 account * (THREAD_SIZE / 1024));
331 } else {
332
333
334
335
336 struct page *first_page = virt_to_page(stack);
337
338 mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
339 THREAD_SIZE / 1024 * account);
340
341 mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
342 account * (THREAD_SIZE / 1024));
343 }
344}
345
346static void release_task_stack(struct task_struct *tsk)
347{
348 if (WARN_ON(tsk->state != TASK_DEAD))
349 return;
350
351 account_kernel_stack(tsk, -1);
352 arch_release_thread_stack(tsk->stack);
353 free_thread_stack(tsk);
354 tsk->stack = NULL;
355#ifdef CONFIG_VMAP_STACK
356 tsk->stack_vm_area = NULL;
357#endif
358}
359
360#ifdef CONFIG_THREAD_INFO_IN_TASK
361void put_task_stack(struct task_struct *tsk)
362{
363 if (atomic_dec_and_test(&tsk->stack_refcount))
364 release_task_stack(tsk);
365}
366#endif
367
368void free_task(struct task_struct *tsk)
369{
370#ifndef CONFIG_THREAD_INFO_IN_TASK
371
372
373
374
375 release_task_stack(tsk);
376#else
377
378
379
380
381 WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
382#endif
383 rt_mutex_debug_task_free(tsk);
384 ftrace_graph_exit_task(tsk);
385 put_seccomp_filter(tsk);
386 arch_release_task_struct(tsk);
387 if (tsk->flags & PF_KTHREAD)
388 free_kthread_struct(tsk);
389 free_task_struct(tsk);
390}
391EXPORT_SYMBOL(free_task);
392
393static inline void free_signal_struct(struct signal_struct *sig)
394{
395 taskstats_tgid_free(sig);
396 sched_autogroup_exit(sig);
397
398
399
400
401 if (sig->oom_mm)
402 mmdrop_async(sig->oom_mm);
403 kmem_cache_free(signal_cachep, sig);
404}
405
406static inline void put_signal_struct(struct signal_struct *sig)
407{
408 if (atomic_dec_and_test(&sig->sigcnt))
409 free_signal_struct(sig);
410}
411
412void __put_task_struct(struct task_struct *tsk)
413{
414 WARN_ON(!tsk->exit_state);
415 WARN_ON(atomic_read(&tsk->usage));
416 WARN_ON(tsk == current);
417
418 cgroup_free(tsk);
419 task_numa_free(tsk);
420 security_task_free(tsk);
421 exit_creds(tsk);
422 delayacct_tsk_free(tsk);
423 put_signal_struct(tsk->signal);
424
425 if (!profile_handoff_task(tsk))
426 free_task(tsk);
427}
428EXPORT_SYMBOL_GPL(__put_task_struct);
429
430void __init __weak arch_task_cache_init(void) { }
431
432
433
434
435static void set_max_threads(unsigned int max_threads_suggested)
436{
437 u64 threads;
438
439
440
441
442
443 if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
444 threads = MAX_THREADS;
445 else
446 threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
447 (u64) THREAD_SIZE * 8UL);
448
449 if (threads > max_threads_suggested)
450 threads = max_threads_suggested;
451
452 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
453}
454
455#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
456
457int arch_task_struct_size __read_mostly;
458#endif
459
460void __init fork_init(void)
461{
462 int i;
463#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
464#ifndef ARCH_MIN_TASKALIGN
465#define ARCH_MIN_TASKALIGN 0
466#endif
467 int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
468
469
470 task_struct_cachep = kmem_cache_create("task_struct",
471 arch_task_struct_size, align,
472 SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL);
473#endif
474
475
476 arch_task_cache_init();
477
478 set_max_threads(MAX_THREADS);
479
480 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
481 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
482 init_task.signal->rlim[RLIMIT_SIGPENDING] =
483 init_task.signal->rlim[RLIMIT_NPROC];
484
485 for (i = 0; i < UCOUNT_COUNTS; i++) {
486 init_user_ns.ucount_max[i] = max_threads/2;
487 }
488
489#ifdef CONFIG_VMAP_STACK
490 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
491 NULL, free_vm_stack_cache);
492#endif
493
494 lockdep_init_task(&init_task);
495}
496
497int __weak arch_dup_task_struct(struct task_struct *dst,
498 struct task_struct *src)
499{
500 *dst = *src;
501 return 0;
502}
503
504void set_task_stack_end_magic(struct task_struct *tsk)
505{
506 unsigned long *stackend;
507
508 stackend = end_of_stack(tsk);
509 *stackend = STACK_END_MAGIC;
510}
511
512static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
513{
514 struct task_struct *tsk;
515 unsigned long *stack;
516 struct vm_struct *stack_vm_area;
517 int err;
518
519 if (node == NUMA_NO_NODE)
520 node = tsk_fork_get_node(orig);
521 tsk = alloc_task_struct_node(node);
522 if (!tsk)
523 return NULL;
524
525 stack = alloc_thread_stack_node(tsk, node);
526 if (!stack)
527 goto free_tsk;
528
529 stack_vm_area = task_stack_vm_area(tsk);
530
531 err = arch_dup_task_struct(tsk, orig);
532
533
534
535
536
537
538 tsk->stack = stack;
539#ifdef CONFIG_VMAP_STACK
540 tsk->stack_vm_area = stack_vm_area;
541#endif
542#ifdef CONFIG_THREAD_INFO_IN_TASK
543 atomic_set(&tsk->stack_refcount, 1);
544#endif
545
546 if (err)
547 goto free_stack;
548
549#ifdef CONFIG_SECCOMP
550
551
552
553
554
555
556 tsk->seccomp.filter = NULL;
557#endif
558
559 setup_thread_stack(tsk, orig);
560 clear_user_return_notifier(tsk);
561 clear_tsk_need_resched(tsk);
562 set_task_stack_end_magic(tsk);
563
564#ifdef CONFIG_CC_STACKPROTECTOR
565 tsk->stack_canary = get_random_canary();
566#endif
567
568
569
570
571
572 atomic_set(&tsk->usage, 2);
573#ifdef CONFIG_BLK_DEV_IO_TRACE
574 tsk->btrace_seq = 0;
575#endif
576 tsk->splice_pipe = NULL;
577 tsk->task_frag.page = NULL;
578 tsk->wake_q.next = NULL;
579
580 account_kernel_stack(tsk, 1);
581
582 kcov_task_init(tsk);
583
584#ifdef CONFIG_FAULT_INJECTION
585 tsk->fail_nth = 0;
586#endif
587
588 return tsk;
589
590free_stack:
591 free_thread_stack(tsk);
592free_tsk:
593 free_task_struct(tsk);
594 return NULL;
595}
596
597#ifdef CONFIG_MMU
598static __latent_entropy int dup_mmap(struct mm_struct *mm,
599 struct mm_struct *oldmm)
600{
601 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
602 struct rb_node **rb_link, *rb_parent;
603 int retval;
604 unsigned long charge;
605 LIST_HEAD(uf);
606
607 uprobe_start_dup_mmap();
608 if (down_write_killable(&oldmm->mmap_sem)) {
609 retval = -EINTR;
610 goto fail_uprobe_end;
611 }
612 flush_cache_dup_mm(oldmm);
613 uprobe_dup_mmap(oldmm, mm);
614
615
616
617 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
618
619
620 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
621
622 mm->total_vm = oldmm->total_vm;
623 mm->data_vm = oldmm->data_vm;
624 mm->exec_vm = oldmm->exec_vm;
625 mm->stack_vm = oldmm->stack_vm;
626
627 rb_link = &mm->mm_rb.rb_node;
628 rb_parent = NULL;
629 pprev = &mm->mmap;
630 retval = ksm_fork(mm, oldmm);
631 if (retval)
632 goto out;
633 retval = khugepaged_fork(mm, oldmm);
634 if (retval)
635 goto out;
636
637 prev = NULL;
638 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
639 struct file *file;
640
641 if (mpnt->vm_flags & VM_DONTCOPY) {
642 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
643 continue;
644 }
645 charge = 0;
646 if (mpnt->vm_flags & VM_ACCOUNT) {
647 unsigned long len = vma_pages(mpnt);
648
649 if (security_vm_enough_memory_mm(oldmm, len))
650 goto fail_nomem;
651 charge = len;
652 }
653 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
654 if (!tmp)
655 goto fail_nomem;
656 *tmp = *mpnt;
657 INIT_LIST_HEAD(&tmp->anon_vma_chain);
658 retval = vma_dup_policy(mpnt, tmp);
659 if (retval)
660 goto fail_nomem_policy;
661 tmp->vm_mm = mm;
662 retval = dup_userfaultfd(tmp, &uf);
663 if (retval)
664 goto fail_nomem_anon_vma_fork;
665 if (tmp->vm_flags & VM_WIPEONFORK) {
666
667 tmp->anon_vma = NULL;
668 if (anon_vma_prepare(tmp))
669 goto fail_nomem_anon_vma_fork;
670 } else if (anon_vma_fork(tmp, mpnt))
671 goto fail_nomem_anon_vma_fork;
672 tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
673 tmp->vm_next = tmp->vm_prev = NULL;
674 file = tmp->vm_file;
675 if (file) {
676 struct inode *inode = file_inode(file);
677 struct address_space *mapping = file->f_mapping;
678
679 get_file(file);
680 if (tmp->vm_flags & VM_DENYWRITE)
681 atomic_dec(&inode->i_writecount);
682 i_mmap_lock_write(mapping);
683 if (tmp->vm_flags & VM_SHARED)
684 atomic_inc(&mapping->i_mmap_writable);
685 flush_dcache_mmap_lock(mapping);
686
687 vma_interval_tree_insert_after(tmp, mpnt,
688 &mapping->i_mmap);
689 flush_dcache_mmap_unlock(mapping);
690 i_mmap_unlock_write(mapping);
691 }
692
693
694
695
696
697
698 if (is_vm_hugetlb_page(tmp))
699 reset_vma_resv_huge_pages(tmp);
700
701
702
703
704 *pprev = tmp;
705 pprev = &tmp->vm_next;
706 tmp->vm_prev = prev;
707 prev = tmp;
708
709 __vma_link_rb(mm, tmp, rb_link, rb_parent);
710 rb_link = &tmp->vm_rb.rb_right;
711 rb_parent = &tmp->vm_rb;
712
713 mm->map_count++;
714 if (!(tmp->vm_flags & VM_WIPEONFORK))
715 retval = copy_page_range(mm, oldmm, mpnt);
716
717 if (tmp->vm_ops && tmp->vm_ops->open)
718 tmp->vm_ops->open(tmp);
719
720 if (retval)
721 goto out;
722 }
723
724 arch_dup_mmap(oldmm, mm);
725 retval = 0;
726out:
727 up_write(&mm->mmap_sem);
728 flush_tlb_mm(oldmm);
729 up_write(&oldmm->mmap_sem);
730 dup_userfaultfd_complete(&uf);
731fail_uprobe_end:
732 uprobe_end_dup_mmap();
733 return retval;
734fail_nomem_anon_vma_fork:
735 mpol_put(vma_policy(tmp));
736fail_nomem_policy:
737 kmem_cache_free(vm_area_cachep, tmp);
738fail_nomem:
739 retval = -ENOMEM;
740 vm_unacct_memory(charge);
741 goto out;
742}
743
744static inline int mm_alloc_pgd(struct mm_struct *mm)
745{
746 mm->pgd = pgd_alloc(mm);
747 if (unlikely(!mm->pgd))
748 return -ENOMEM;
749 return 0;
750}
751
752static inline void mm_free_pgd(struct mm_struct *mm)
753{
754 pgd_free(mm, mm->pgd);
755}
756#else
757static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
758{
759 down_write(&oldmm->mmap_sem);
760 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
761 up_write(&oldmm->mmap_sem);
762 return 0;
763}
764#define mm_alloc_pgd(mm) (0)
765#define mm_free_pgd(mm)
766#endif
767
768__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
769
770#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
771#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
772
773static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
774
775static int __init coredump_filter_setup(char *s)
776{
777 default_dump_filter =
778 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
779 MMF_DUMP_FILTER_MASK;
780 return 1;
781}
782
783__setup("coredump_filter=", coredump_filter_setup);
784
785#include <linux/init_task.h>
786
787static void mm_init_aio(struct mm_struct *mm)
788{
789#ifdef CONFIG_AIO
790 spin_lock_init(&mm->ioctx_lock);
791 mm->ioctx_table = NULL;
792#endif
793}
794
795static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
796{
797#ifdef CONFIG_MEMCG
798 mm->owner = p;
799#endif
800}
801
802static void mm_init_uprobes_state(struct mm_struct *mm)
803{
804#ifdef CONFIG_UPROBES
805 mm->uprobes_state.xol_area = NULL;
806#endif
807}
808
809static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
810 struct user_namespace *user_ns)
811{
812 mm->mmap = NULL;
813 mm->mm_rb = RB_ROOT;
814 mm->vmacache_seqnum = 0;
815 atomic_set(&mm->mm_users, 1);
816 atomic_set(&mm->mm_count, 1);
817 init_rwsem(&mm->mmap_sem);
818 INIT_LIST_HEAD(&mm->mmlist);
819 mm->core_state = NULL;
820 atomic_long_set(&mm->nr_ptes, 0);
821 mm_nr_pmds_init(mm);
822 mm->map_count = 0;
823 mm->locked_vm = 0;
824 mm->pinned_vm = 0;
825 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
826 spin_lock_init(&mm->page_table_lock);
827 mm_init_cpumask(mm);
828 mm_init_aio(mm);
829 mm_init_owner(mm, p);
830 RCU_INIT_POINTER(mm->exe_file, NULL);
831 mmu_notifier_mm_init(mm);
832 hmm_mm_init(mm);
833 init_tlb_flush_pending(mm);
834#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
835 mm->pmd_huge_pte = NULL;
836#endif
837 mm_init_uprobes_state(mm);
838
839 if (current->mm) {
840 mm->flags = current->mm->flags & MMF_INIT_MASK;
841 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
842 } else {
843 mm->flags = default_dump_filter;
844 mm->def_flags = 0;
845 }
846
847 if (mm_alloc_pgd(mm))
848 goto fail_nopgd;
849
850 if (init_new_context(p, mm))
851 goto fail_nocontext;
852
853 mm->user_ns = get_user_ns(user_ns);
854 return mm;
855
856fail_nocontext:
857 mm_free_pgd(mm);
858fail_nopgd:
859 free_mm(mm);
860 return NULL;
861}
862
863static void check_mm(struct mm_struct *mm)
864{
865 int i;
866
867 for (i = 0; i < NR_MM_COUNTERS; i++) {
868 long x = atomic_long_read(&mm->rss_stat.count[i]);
869
870 if (unlikely(x))
871 printk(KERN_ALERT "BUG: Bad rss-counter state "
872 "mm:%p idx:%d val:%ld\n", mm, i, x);
873 }
874
875 if (atomic_long_read(&mm->nr_ptes))
876 pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n",
877 atomic_long_read(&mm->nr_ptes));
878 if (mm_nr_pmds(mm))
879 pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n",
880 mm_nr_pmds(mm));
881
882#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
883 VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
884#endif
885}
886
887
888
889
890struct mm_struct *mm_alloc(void)
891{
892 struct mm_struct *mm;
893
894 mm = allocate_mm();
895 if (!mm)
896 return NULL;
897
898 memset(mm, 0, sizeof(*mm));
899 return mm_init(mm, current, current_user_ns());
900}
901
902
903
904
905
906
907void __mmdrop(struct mm_struct *mm)
908{
909 BUG_ON(mm == &init_mm);
910 mm_free_pgd(mm);
911 destroy_context(mm);
912 hmm_mm_destroy(mm);
913 mmu_notifier_mm_destroy(mm);
914 check_mm(mm);
915 put_user_ns(mm->user_ns);
916 free_mm(mm);
917}
918EXPORT_SYMBOL_GPL(__mmdrop);
919
920static inline void __mmput(struct mm_struct *mm)
921{
922 VM_BUG_ON(atomic_read(&mm->mm_users));
923
924 uprobe_clear_state(mm);
925 exit_aio(mm);
926 ksm_exit(mm);
927 khugepaged_exit(mm);
928 exit_mmap(mm);
929 mm_put_huge_zero_page(mm);
930 set_mm_exe_file(mm, NULL);
931 if (!list_empty(&mm->mmlist)) {
932 spin_lock(&mmlist_lock);
933 list_del(&mm->mmlist);
934 spin_unlock(&mmlist_lock);
935 }
936 if (mm->binfmt)
937 module_put(mm->binfmt->module);
938 mmdrop(mm);
939}
940
941
942
943
944void mmput(struct mm_struct *mm)
945{
946 might_sleep();
947
948 if (atomic_dec_and_test(&mm->mm_users))
949 __mmput(mm);
950}
951EXPORT_SYMBOL_GPL(mmput);
952
953#ifdef CONFIG_MMU
954static void mmput_async_fn(struct work_struct *work)
955{
956 struct mm_struct *mm = container_of(work, struct mm_struct,
957 async_put_work);
958
959 __mmput(mm);
960}
961
962void mmput_async(struct mm_struct *mm)
963{
964 if (atomic_dec_and_test(&mm->mm_users)) {
965 INIT_WORK(&mm->async_put_work, mmput_async_fn);
966 schedule_work(&mm->async_put_work);
967 }
968}
969#endif
970
971
972
973
974
975
976
977
978
979
980
981
982void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
983{
984 struct file *old_exe_file;
985
986
987
988
989
990
991 old_exe_file = rcu_dereference_raw(mm->exe_file);
992
993 if (new_exe_file)
994 get_file(new_exe_file);
995 rcu_assign_pointer(mm->exe_file, new_exe_file);
996 if (old_exe_file)
997 fput(old_exe_file);
998}
999
1000
1001
1002
1003
1004
1005
1006struct file *get_mm_exe_file(struct mm_struct *mm)
1007{
1008 struct file *exe_file;
1009
1010 rcu_read_lock();
1011 exe_file = rcu_dereference(mm->exe_file);
1012 if (exe_file && !get_file_rcu(exe_file))
1013 exe_file = NULL;
1014 rcu_read_unlock();
1015 return exe_file;
1016}
1017EXPORT_SYMBOL(get_mm_exe_file);
1018
1019
1020
1021
1022
1023
1024
1025
1026struct file *get_task_exe_file(struct task_struct *task)
1027{
1028 struct file *exe_file = NULL;
1029 struct mm_struct *mm;
1030
1031 task_lock(task);
1032 mm = task->mm;
1033 if (mm) {
1034 if (!(task->flags & PF_KTHREAD))
1035 exe_file = get_mm_exe_file(mm);
1036 }
1037 task_unlock(task);
1038 return exe_file;
1039}
1040EXPORT_SYMBOL(get_task_exe_file);
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051struct mm_struct *get_task_mm(struct task_struct *task)
1052{
1053 struct mm_struct *mm;
1054
1055 task_lock(task);
1056 mm = task->mm;
1057 if (mm) {
1058 if (task->flags & PF_KTHREAD)
1059 mm = NULL;
1060 else
1061 mmget(mm);
1062 }
1063 task_unlock(task);
1064 return mm;
1065}
1066EXPORT_SYMBOL_GPL(get_task_mm);
1067
1068struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1069{
1070 struct mm_struct *mm;
1071 int err;
1072
1073 err = mutex_lock_killable(&task->signal->cred_guard_mutex);
1074 if (err)
1075 return ERR_PTR(err);
1076
1077 mm = get_task_mm(task);
1078 if (mm && mm != current->mm &&
1079 !ptrace_may_access(task, mode)) {
1080 mmput(mm);
1081 mm = ERR_PTR(-EACCES);
1082 }
1083 mutex_unlock(&task->signal->cred_guard_mutex);
1084
1085 return mm;
1086}
1087
1088static void complete_vfork_done(struct task_struct *tsk)
1089{
1090 struct completion *vfork;
1091
1092 task_lock(tsk);
1093 vfork = tsk->vfork_done;
1094 if (likely(vfork)) {
1095 tsk->vfork_done = NULL;
1096 complete(vfork);
1097 }
1098 task_unlock(tsk);
1099}
1100
1101static int wait_for_vfork_done(struct task_struct *child,
1102 struct completion *vfork)
1103{
1104 int killed;
1105
1106 freezer_do_not_count();
1107 killed = wait_for_completion_killable(vfork);
1108 freezer_count();
1109
1110 if (killed) {
1111 task_lock(child);
1112 child->vfork_done = NULL;
1113 task_unlock(child);
1114 }
1115
1116 put_task_struct(child);
1117 return killed;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133void mm_release(struct task_struct *tsk, struct mm_struct *mm)
1134{
1135
1136#ifdef CONFIG_FUTEX
1137 if (unlikely(tsk->robust_list)) {
1138 exit_robust_list(tsk);
1139 tsk->robust_list = NULL;
1140 }
1141#ifdef CONFIG_COMPAT
1142 if (unlikely(tsk->compat_robust_list)) {
1143 compat_exit_robust_list(tsk);
1144 tsk->compat_robust_list = NULL;
1145 }
1146#endif
1147 if (unlikely(!list_empty(&tsk->pi_state_list)))
1148 exit_pi_state_list(tsk);
1149#endif
1150
1151 uprobe_free_utask(tsk);
1152
1153
1154 deactivate_mm(tsk, mm);
1155
1156
1157
1158
1159
1160
1161 if (tsk->clear_child_tid) {
1162 if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
1163 atomic_read(&mm->mm_users) > 1) {
1164
1165
1166
1167
1168 put_user(0, tsk->clear_child_tid);
1169 sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
1170 1, NULL, NULL, 0);
1171 }
1172 tsk->clear_child_tid = NULL;
1173 }
1174
1175
1176
1177
1178
1179 if (tsk->vfork_done)
1180 complete_vfork_done(tsk);
1181}
1182
1183
1184
1185
1186
1187static struct mm_struct *dup_mm(struct task_struct *tsk)
1188{
1189 struct mm_struct *mm, *oldmm = current->mm;
1190 int err;
1191
1192 mm = allocate_mm();
1193 if (!mm)
1194 goto fail_nomem;
1195
1196 memcpy(mm, oldmm, sizeof(*mm));
1197
1198 if (!mm_init(mm, tsk, mm->user_ns))
1199 goto fail_nomem;
1200
1201 err = dup_mmap(mm, oldmm);
1202 if (err)
1203 goto free_pt;
1204
1205 mm->hiwater_rss = get_mm_rss(mm);
1206 mm->hiwater_vm = mm->total_vm;
1207
1208 if (mm->binfmt && !try_module_get(mm->binfmt->module))
1209 goto free_pt;
1210
1211 return mm;
1212
1213free_pt:
1214
1215 mm->binfmt = NULL;
1216 mmput(mm);
1217
1218fail_nomem:
1219 return NULL;
1220}
1221
1222static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
1223{
1224 struct mm_struct *mm, *oldmm;
1225 int retval;
1226
1227 tsk->min_flt = tsk->maj_flt = 0;
1228 tsk->nvcsw = tsk->nivcsw = 0;
1229#ifdef CONFIG_DETECT_HUNG_TASK
1230 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1231#endif
1232
1233 tsk->mm = NULL;
1234 tsk->active_mm = NULL;
1235
1236
1237
1238
1239
1240
1241 oldmm = current->mm;
1242 if (!oldmm)
1243 return 0;
1244
1245
1246 vmacache_flush(tsk);
1247
1248 if (clone_flags & CLONE_VM) {
1249 mmget(oldmm);
1250 mm = oldmm;
1251 goto good_mm;
1252 }
1253
1254 retval = -ENOMEM;
1255 mm = dup_mm(tsk);
1256 if (!mm)
1257 goto fail_nomem;
1258
1259good_mm:
1260 tsk->mm = mm;
1261 tsk->active_mm = mm;
1262 return 0;
1263
1264fail_nomem:
1265 return retval;
1266}
1267
1268static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
1269{
1270 struct fs_struct *fs = current->fs;
1271 if (clone_flags & CLONE_FS) {
1272
1273 spin_lock(&fs->lock);
1274 if (fs->in_exec) {
1275 spin_unlock(&fs->lock);
1276 return -EAGAIN;
1277 }
1278 fs->users++;
1279 spin_unlock(&fs->lock);
1280 return 0;
1281 }
1282 tsk->fs = copy_fs_struct(fs);
1283 if (!tsk->fs)
1284 return -ENOMEM;
1285 return 0;
1286}
1287
1288static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
1289{
1290 struct files_struct *oldf, *newf;
1291 int error = 0;
1292
1293
1294
1295
1296 oldf = current->files;
1297 if (!oldf)
1298 goto out;
1299
1300 if (clone_flags & CLONE_FILES) {
1301 atomic_inc(&oldf->count);
1302 goto out;
1303 }
1304
1305 newf = dup_fd(oldf, &error);
1306 if (!newf)
1307 goto out;
1308
1309 tsk->files = newf;
1310 error = 0;
1311out:
1312 return error;
1313}
1314
1315static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
1316{
1317#ifdef CONFIG_BLOCK
1318 struct io_context *ioc = current->io_context;
1319 struct io_context *new_ioc;
1320
1321 if (!ioc)
1322 return 0;
1323
1324
1325
1326 if (clone_flags & CLONE_IO) {
1327 ioc_task_link(ioc);
1328 tsk->io_context = ioc;
1329 } else if (ioprio_valid(ioc->ioprio)) {
1330 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
1331 if (unlikely(!new_ioc))
1332 return -ENOMEM;
1333
1334 new_ioc->ioprio = ioc->ioprio;
1335 put_io_context(new_ioc);
1336 }
1337#endif
1338 return 0;
1339}
1340
1341static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1342{
1343 struct sighand_struct *sig;
1344
1345 if (clone_flags & CLONE_SIGHAND) {
1346 atomic_inc(¤t->sighand->count);
1347 return 0;
1348 }
1349 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1350 rcu_assign_pointer(tsk->sighand, sig);
1351 if (!sig)
1352 return -ENOMEM;
1353
1354 atomic_set(&sig->count, 1);
1355 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1356 return 0;
1357}
1358
1359void __cleanup_sighand(struct sighand_struct *sighand)
1360{
1361 if (atomic_dec_and_test(&sighand->count)) {
1362 signalfd_cleanup(sighand);
1363
1364
1365
1366
1367 kmem_cache_free(sighand_cachep, sighand);
1368 }
1369}
1370
1371#ifdef CONFIG_POSIX_TIMERS
1372
1373
1374
1375static void posix_cpu_timers_init_group(struct signal_struct *sig)
1376{
1377 unsigned long cpu_limit;
1378
1379 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1380 if (cpu_limit != RLIM_INFINITY) {
1381 sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC;
1382 sig->cputimer.running = true;
1383 }
1384
1385
1386 INIT_LIST_HEAD(&sig->cpu_timers[0]);
1387 INIT_LIST_HEAD(&sig->cpu_timers[1]);
1388 INIT_LIST_HEAD(&sig->cpu_timers[2]);
1389}
1390#else
1391static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { }
1392#endif
1393
1394static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1395{
1396 struct signal_struct *sig;
1397
1398 if (clone_flags & CLONE_THREAD)
1399 return 0;
1400
1401 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1402 tsk->signal = sig;
1403 if (!sig)
1404 return -ENOMEM;
1405
1406 sig->nr_threads = 1;
1407 atomic_set(&sig->live, 1);
1408 atomic_set(&sig->sigcnt, 1);
1409
1410
1411 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1412 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1413
1414 init_waitqueue_head(&sig->wait_chldexit);
1415 sig->curr_target = tsk;
1416 init_sigpending(&sig->shared_pending);
1417 seqlock_init(&sig->stats_lock);
1418 prev_cputime_init(&sig->prev_cputime);
1419
1420#ifdef CONFIG_POSIX_TIMERS
1421 INIT_LIST_HEAD(&sig->posix_timers);
1422 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1423 sig->real_timer.function = it_real_fn;
1424#endif
1425
1426 task_lock(current->group_leader);
1427 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1428 task_unlock(current->group_leader);
1429
1430 posix_cpu_timers_init_group(sig);
1431
1432 tty_audit_fork(sig);
1433 sched_autogroup_fork(sig);
1434
1435 sig->oom_score_adj = current->signal->oom_score_adj;
1436 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1437
1438 mutex_init(&sig->cred_guard_mutex);
1439
1440 return 0;
1441}
1442
1443static void copy_seccomp(struct task_struct *p)
1444{
1445#ifdef CONFIG_SECCOMP
1446
1447
1448
1449
1450
1451
1452 assert_spin_locked(¤t->sighand->siglock);
1453
1454
1455 get_seccomp_filter(current);
1456 p->seccomp = current->seccomp;
1457
1458
1459
1460
1461
1462
1463 if (task_no_new_privs(current))
1464 task_set_no_new_privs(p);
1465
1466
1467
1468
1469
1470
1471 if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
1472 set_tsk_thread_flag(p, TIF_SECCOMP);
1473#endif
1474}
1475
1476SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1477{
1478 current->clear_child_tid = tidptr;
1479
1480 return task_pid_vnr(current);
1481}
1482
1483static void rt_mutex_init_task(struct task_struct *p)
1484{
1485 raw_spin_lock_init(&p->pi_lock);
1486#ifdef CONFIG_RT_MUTEXES
1487 p->pi_waiters = RB_ROOT_CACHED;
1488 p->pi_top_task = NULL;
1489 p->pi_blocked_on = NULL;
1490#endif
1491}
1492
1493#ifdef CONFIG_POSIX_TIMERS
1494
1495
1496
1497static void posix_cpu_timers_init(struct task_struct *tsk)
1498{
1499 tsk->cputime_expires.prof_exp = 0;
1500 tsk->cputime_expires.virt_exp = 0;
1501 tsk->cputime_expires.sched_exp = 0;
1502 INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1503 INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1504 INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1505}
1506#else
1507static inline void posix_cpu_timers_init(struct task_struct *tsk) { }
1508#endif
1509
1510static inline void
1511init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1512{
1513 task->pids[type].pid = pid;
1514}
1515
1516static inline void rcu_copy_process(struct task_struct *p)
1517{
1518#ifdef CONFIG_PREEMPT_RCU
1519 p->rcu_read_lock_nesting = 0;
1520 p->rcu_read_unlock_special.s = 0;
1521 p->rcu_blocked_node = NULL;
1522 INIT_LIST_HEAD(&p->rcu_node_entry);
1523#endif
1524#ifdef CONFIG_TASKS_RCU
1525 p->rcu_tasks_holdout = false;
1526 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
1527 p->rcu_tasks_idle_cpu = -1;
1528#endif
1529}
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539static __latent_entropy struct task_struct *copy_process(
1540 unsigned long clone_flags,
1541 unsigned long stack_start,
1542 unsigned long stack_size,
1543 int __user *child_tidptr,
1544 struct pid *pid,
1545 int trace,
1546 unsigned long tls,
1547 int node)
1548{
1549 int retval;
1550 struct task_struct *p;
1551
1552 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1553 return ERR_PTR(-EINVAL);
1554
1555 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1556 return ERR_PTR(-EINVAL);
1557
1558
1559
1560
1561
1562 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1563 return ERR_PTR(-EINVAL);
1564
1565
1566
1567
1568
1569
1570 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1571 return ERR_PTR(-EINVAL);
1572
1573
1574
1575
1576
1577
1578
1579 if ((clone_flags & CLONE_PARENT) &&
1580 current->signal->flags & SIGNAL_UNKILLABLE)
1581 return ERR_PTR(-EINVAL);
1582
1583
1584
1585
1586
1587 if (clone_flags & CLONE_THREAD) {
1588 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1589 (task_active_pid_ns(current) !=
1590 current->nsproxy->pid_ns_for_children))
1591 return ERR_PTR(-EINVAL);
1592 }
1593
1594 retval = -ENOMEM;
1595 p = dup_task_struct(current, node);
1596 if (!p)
1597 goto fork_out;
1598
1599
1600
1601
1602
1603
1604
1605 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1606
1607
1608
1609 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1610
1611 ftrace_graph_init_task(p);
1612
1613 rt_mutex_init_task(p);
1614
1615#ifdef CONFIG_PROVE_LOCKING
1616 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1617 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1618#endif
1619 retval = -EAGAIN;
1620 if (atomic_read(&p->real_cred->user->processes) >=
1621 task_rlimit(p, RLIMIT_NPROC)) {
1622 if (p->real_cred->user != INIT_USER &&
1623 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
1624 goto bad_fork_free;
1625 }
1626 current->flags &= ~PF_NPROC_EXCEEDED;
1627
1628 retval = copy_creds(p, clone_flags);
1629 if (retval < 0)
1630 goto bad_fork_free;
1631
1632
1633
1634
1635
1636
1637 retval = -EAGAIN;
1638 if (nr_threads >= max_threads)
1639 goto bad_fork_cleanup_count;
1640
1641 delayacct_tsk_init(p);
1642 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
1643 p->flags |= PF_FORKNOEXEC;
1644 INIT_LIST_HEAD(&p->children);
1645 INIT_LIST_HEAD(&p->sibling);
1646 rcu_copy_process(p);
1647 p->vfork_done = NULL;
1648 spin_lock_init(&p->alloc_lock);
1649
1650 init_sigpending(&p->pending);
1651
1652 p->utime = p->stime = p->gtime = 0;
1653#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1654 p->utimescaled = p->stimescaled = 0;
1655#endif
1656 prev_cputime_init(&p->prev_cputime);
1657
1658#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1659 seqcount_init(&p->vtime.seqcount);
1660 p->vtime.starttime = 0;
1661 p->vtime.state = VTIME_INACTIVE;
1662#endif
1663
1664#if defined(SPLIT_RSS_COUNTING)
1665 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1666#endif
1667
1668 p->default_timer_slack_ns = current->timer_slack_ns;
1669
1670 task_io_accounting_init(&p->ioac);
1671 acct_clear_integrals(p);
1672
1673 posix_cpu_timers_init(p);
1674
1675 p->start_time = ktime_get_ns();
1676 p->real_start_time = ktime_get_boot_ns();
1677 p->io_context = NULL;
1678 p->audit_context = NULL;
1679 cgroup_fork(p);
1680#ifdef CONFIG_NUMA
1681 p->mempolicy = mpol_dup(p->mempolicy);
1682 if (IS_ERR(p->mempolicy)) {
1683 retval = PTR_ERR(p->mempolicy);
1684 p->mempolicy = NULL;
1685 goto bad_fork_cleanup_threadgroup_lock;
1686 }
1687#endif
1688#ifdef CONFIG_CPUSETS
1689 p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1690 p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1691 seqcount_init(&p->mems_allowed_seq);
1692#endif
1693#ifdef CONFIG_TRACE_IRQFLAGS
1694 p->irq_events = 0;
1695 p->hardirqs_enabled = 0;
1696 p->hardirq_enable_ip = 0;
1697 p->hardirq_enable_event = 0;
1698 p->hardirq_disable_ip = _THIS_IP_;
1699 p->hardirq_disable_event = 0;
1700 p->softirqs_enabled = 1;
1701 p->softirq_enable_ip = _THIS_IP_;
1702 p->softirq_enable_event = 0;
1703 p->softirq_disable_ip = 0;
1704 p->softirq_disable_event = 0;
1705 p->hardirq_context = 0;
1706 p->softirq_context = 0;
1707#endif
1708
1709 p->pagefault_disabled = 0;
1710
1711#ifdef CONFIG_LOCKDEP
1712 p->lockdep_depth = 0;
1713 p->curr_chain_key = 0;
1714 p->lockdep_recursion = 0;
1715 lockdep_init_task(p);
1716#endif
1717
1718#ifdef CONFIG_DEBUG_MUTEXES
1719 p->blocked_on = NULL;
1720#endif
1721#ifdef CONFIG_BCACHE
1722 p->sequential_io = 0;
1723 p->sequential_io_avg = 0;
1724#endif
1725
1726
1727 retval = sched_fork(clone_flags, p);
1728 if (retval)
1729 goto bad_fork_cleanup_policy;
1730
1731 retval = perf_event_init_task(p);
1732 if (retval)
1733 goto bad_fork_cleanup_policy;
1734 retval = audit_alloc(p);
1735 if (retval)
1736 goto bad_fork_cleanup_perf;
1737
1738 shm_init_task(p);
1739 retval = security_task_alloc(p, clone_flags);
1740 if (retval)
1741 goto bad_fork_cleanup_audit;
1742 retval = copy_semundo(clone_flags, p);
1743 if (retval)
1744 goto bad_fork_cleanup_security;
1745 retval = copy_files(clone_flags, p);
1746 if (retval)
1747 goto bad_fork_cleanup_semundo;
1748 retval = copy_fs(clone_flags, p);
1749 if (retval)
1750 goto bad_fork_cleanup_files;
1751 retval = copy_sighand(clone_flags, p);
1752 if (retval)
1753 goto bad_fork_cleanup_fs;
1754 retval = copy_signal(clone_flags, p);
1755 if (retval)
1756 goto bad_fork_cleanup_sighand;
1757 retval = copy_mm(clone_flags, p);
1758 if (retval)
1759 goto bad_fork_cleanup_signal;
1760 retval = copy_namespaces(clone_flags, p);
1761 if (retval)
1762 goto bad_fork_cleanup_mm;
1763 retval = copy_io(clone_flags, p);
1764 if (retval)
1765 goto bad_fork_cleanup_namespaces;
1766 retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
1767 if (retval)
1768 goto bad_fork_cleanup_io;
1769
1770 if (pid != &init_struct_pid) {
1771 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
1772 if (IS_ERR(pid)) {
1773 retval = PTR_ERR(pid);
1774 goto bad_fork_cleanup_thread;
1775 }
1776 }
1777
1778#ifdef CONFIG_BLOCK
1779 p->plug = NULL;
1780#endif
1781#ifdef CONFIG_FUTEX
1782 p->robust_list = NULL;
1783#ifdef CONFIG_COMPAT
1784 p->compat_robust_list = NULL;
1785#endif
1786 INIT_LIST_HEAD(&p->pi_state_list);
1787 p->pi_state_cache = NULL;
1788#endif
1789
1790
1791
1792 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1793 sas_ss_reset(p);
1794
1795
1796
1797
1798
1799 user_disable_single_step(p);
1800 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1801#ifdef TIF_SYSCALL_EMU
1802 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1803#endif
1804 clear_all_latency_tracing(p);
1805
1806
1807 p->pid = pid_nr(pid);
1808 if (clone_flags & CLONE_THREAD) {
1809 p->exit_signal = -1;
1810 p->group_leader = current->group_leader;
1811 p->tgid = current->tgid;
1812 } else {
1813 if (clone_flags & CLONE_PARENT)
1814 p->exit_signal = current->group_leader->exit_signal;
1815 else
1816 p->exit_signal = (clone_flags & CSIGNAL);
1817 p->group_leader = p;
1818 p->tgid = p->pid;
1819 }
1820
1821 p->nr_dirtied = 0;
1822 p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1823 p->dirty_paused_when = 0;
1824
1825 p->pdeath_signal = 0;
1826 INIT_LIST_HEAD(&p->thread_group);
1827 p->task_works = NULL;
1828
1829 cgroup_threadgroup_change_begin(current);
1830
1831
1832
1833
1834
1835
1836 retval = cgroup_can_fork(p);
1837 if (retval)
1838 goto bad_fork_free_pid;
1839
1840
1841
1842
1843
1844 write_lock_irq(&tasklist_lock);
1845
1846
1847 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1848 p->real_parent = current->real_parent;
1849 p->parent_exec_id = current->parent_exec_id;
1850 } else {
1851 p->real_parent = current;
1852 p->parent_exec_id = current->self_exec_id;
1853 }
1854
1855 klp_copy_process(p);
1856
1857 spin_lock(¤t->sighand->siglock);
1858
1859
1860
1861
1862
1863 copy_seccomp(p);
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873 recalc_sigpending();
1874 if (signal_pending(current)) {
1875 retval = -ERESTARTNOINTR;
1876 goto bad_fork_cancel_cgroup;
1877 }
1878 if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
1879 retval = -ENOMEM;
1880 goto bad_fork_cancel_cgroup;
1881 }
1882
1883 if (likely(p->pid)) {
1884 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1885
1886 init_task_pid(p, PIDTYPE_PID, pid);
1887 if (thread_group_leader(p)) {
1888 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
1889 init_task_pid(p, PIDTYPE_SID, task_session(current));
1890
1891 if (is_child_reaper(pid)) {
1892 ns_of_pid(pid)->child_reaper = p;
1893 p->signal->flags |= SIGNAL_UNKILLABLE;
1894 }
1895
1896 p->signal->leader_pid = pid;
1897 p->signal->tty = tty_kref_get(current->signal->tty);
1898
1899
1900
1901
1902
1903 p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
1904 p->real_parent->signal->is_child_subreaper;
1905 list_add_tail(&p->sibling, &p->real_parent->children);
1906 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1907 attach_pid(p, PIDTYPE_PGID);
1908 attach_pid(p, PIDTYPE_SID);
1909 __this_cpu_inc(process_counts);
1910 } else {
1911 current->signal->nr_threads++;
1912 atomic_inc(¤t->signal->live);
1913 atomic_inc(¤t->signal->sigcnt);
1914 list_add_tail_rcu(&p->thread_group,
1915 &p->group_leader->thread_group);
1916 list_add_tail_rcu(&p->thread_node,
1917 &p->signal->thread_head);
1918 }
1919 attach_pid(p, PIDTYPE_PID);
1920 nr_threads++;
1921 }
1922
1923 total_forks++;
1924 spin_unlock(¤t->sighand->siglock);
1925 syscall_tracepoint_update(p);
1926 write_unlock_irq(&tasklist_lock);
1927
1928 proc_fork_connector(p);
1929 cgroup_post_fork(p);
1930 cgroup_threadgroup_change_end(current);
1931 perf_event_fork(p);
1932
1933 trace_task_newtask(p, clone_flags);
1934 uprobe_copy_process(p, clone_flags);
1935
1936 return p;
1937
1938bad_fork_cancel_cgroup:
1939 spin_unlock(¤t->sighand->siglock);
1940 write_unlock_irq(&tasklist_lock);
1941 cgroup_cancel_fork(p);
1942bad_fork_free_pid:
1943 cgroup_threadgroup_change_end(current);
1944 if (pid != &init_struct_pid)
1945 free_pid(pid);
1946bad_fork_cleanup_thread:
1947 exit_thread(p);
1948bad_fork_cleanup_io:
1949 if (p->io_context)
1950 exit_io_context(p);
1951bad_fork_cleanup_namespaces:
1952 exit_task_namespaces(p);
1953bad_fork_cleanup_mm:
1954 if (p->mm)
1955 mmput(p->mm);
1956bad_fork_cleanup_signal:
1957 if (!(clone_flags & CLONE_THREAD))
1958 free_signal_struct(p->signal);
1959bad_fork_cleanup_sighand:
1960 __cleanup_sighand(p->sighand);
1961bad_fork_cleanup_fs:
1962 exit_fs(p);
1963bad_fork_cleanup_files:
1964 exit_files(p);
1965bad_fork_cleanup_semundo:
1966 exit_sem(p);
1967bad_fork_cleanup_security:
1968 security_task_free(p);
1969bad_fork_cleanup_audit:
1970 audit_free(p);
1971bad_fork_cleanup_perf:
1972 perf_event_free_task(p);
1973bad_fork_cleanup_policy:
1974 lockdep_free_task(p);
1975#ifdef CONFIG_NUMA
1976 mpol_put(p->mempolicy);
1977bad_fork_cleanup_threadgroup_lock:
1978#endif
1979 delayacct_tsk_free(p);
1980bad_fork_cleanup_count:
1981 atomic_dec(&p->cred->user->processes);
1982 exit_creds(p);
1983bad_fork_free:
1984 p->state = TASK_DEAD;
1985 put_task_stack(p);
1986 free_task(p);
1987fork_out:
1988 return ERR_PTR(retval);
1989}
1990
1991static inline void init_idle_pids(struct pid_link *links)
1992{
1993 enum pid_type type;
1994
1995 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1996 INIT_HLIST_NODE(&links[type].node);
1997 links[type].pid = &init_struct_pid;
1998 }
1999}
2000
2001struct task_struct *fork_idle(int cpu)
2002{
2003 struct task_struct *task;
2004 task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
2005 cpu_to_node(cpu));
2006 if (!IS_ERR(task)) {
2007 init_idle_pids(task->pids);
2008 init_idle(task, cpu);
2009 }
2010
2011 return task;
2012}
2013
2014
2015
2016
2017
2018
2019
2020long _do_fork(unsigned long clone_flags,
2021 unsigned long stack_start,
2022 unsigned long stack_size,
2023 int __user *parent_tidptr,
2024 int __user *child_tidptr,
2025 unsigned long tls)
2026{
2027 struct task_struct *p;
2028 int trace = 0;
2029 long nr;
2030
2031
2032
2033
2034
2035
2036
2037 if (!(clone_flags & CLONE_UNTRACED)) {
2038 if (clone_flags & CLONE_VFORK)
2039 trace = PTRACE_EVENT_VFORK;
2040 else if ((clone_flags & CSIGNAL) != SIGCHLD)
2041 trace = PTRACE_EVENT_CLONE;
2042 else
2043 trace = PTRACE_EVENT_FORK;
2044
2045 if (likely(!ptrace_event_enabled(current, trace)))
2046 trace = 0;
2047 }
2048
2049 p = copy_process(clone_flags, stack_start, stack_size,
2050 child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
2051 add_latent_entropy();
2052
2053
2054
2055
2056 if (!IS_ERR(p)) {
2057 struct completion vfork;
2058 struct pid *pid;
2059
2060 trace_sched_process_fork(current, p);
2061
2062 pid = get_task_pid(p, PIDTYPE_PID);
2063 nr = pid_vnr(pid);
2064
2065 if (clone_flags & CLONE_PARENT_SETTID)
2066 put_user(nr, parent_tidptr);
2067
2068 if (clone_flags & CLONE_VFORK) {
2069 p->vfork_done = &vfork;
2070 init_completion(&vfork);
2071 get_task_struct(p);
2072 }
2073
2074 wake_up_new_task(p);
2075
2076
2077 if (unlikely(trace))
2078 ptrace_event_pid(trace, pid);
2079
2080 if (clone_flags & CLONE_VFORK) {
2081 if (!wait_for_vfork_done(p, &vfork))
2082 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
2083 }
2084
2085 put_pid(pid);
2086 } else {
2087 nr = PTR_ERR(p);
2088 }
2089 return nr;
2090}
2091
2092#ifndef CONFIG_HAVE_COPY_THREAD_TLS
2093
2094
2095long do_fork(unsigned long clone_flags,
2096 unsigned long stack_start,
2097 unsigned long stack_size,
2098 int __user *parent_tidptr,
2099 int __user *child_tidptr)
2100{
2101 return _do_fork(clone_flags, stack_start, stack_size,
2102 parent_tidptr, child_tidptr, 0);
2103}
2104#endif
2105
2106
2107
2108
2109pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
2110{
2111 return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
2112 (unsigned long)arg, NULL, NULL, 0);
2113}
2114
2115#ifdef __ARCH_WANT_SYS_FORK
2116SYSCALL_DEFINE0(fork)
2117{
2118#ifdef CONFIG_MMU
2119 return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0);
2120#else
2121
2122 return -EINVAL;
2123#endif
2124}
2125#endif
2126
2127#ifdef __ARCH_WANT_SYS_VFORK
2128SYSCALL_DEFINE0(vfork)
2129{
2130 return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
2131 0, NULL, NULL, 0);
2132}
2133#endif
2134
2135#ifdef __ARCH_WANT_SYS_CLONE
2136#ifdef CONFIG_CLONE_BACKWARDS
2137SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2138 int __user *, parent_tidptr,
2139 unsigned long, tls,
2140 int __user *, child_tidptr)
2141#elif defined(CONFIG_CLONE_BACKWARDS2)
2142SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
2143 int __user *, parent_tidptr,
2144 int __user *, child_tidptr,
2145 unsigned long, tls)
2146#elif defined(CONFIG_CLONE_BACKWARDS3)
2147SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
2148 int, stack_size,
2149 int __user *, parent_tidptr,
2150 int __user *, child_tidptr,
2151 unsigned long, tls)
2152#else
2153SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2154 int __user *, parent_tidptr,
2155 int __user *, child_tidptr,
2156 unsigned long, tls)
2157#endif
2158{
2159 return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls);
2160}
2161#endif
2162
2163void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
2164{
2165 struct task_struct *leader, *parent, *child;
2166 int res;
2167
2168 read_lock(&tasklist_lock);
2169 leader = top = top->group_leader;
2170down:
2171 for_each_thread(leader, parent) {
2172 list_for_each_entry(child, &parent->children, sibling) {
2173 res = visitor(child, data);
2174 if (res) {
2175 if (res < 0)
2176 goto out;
2177 leader = child;
2178 goto down;
2179 }
2180up:
2181 ;
2182 }
2183 }
2184
2185 if (leader != top) {
2186 child = leader;
2187 parent = child->real_parent;
2188 leader = parent->group_leader;
2189 goto up;
2190 }
2191out:
2192 read_unlock(&tasklist_lock);
2193}
2194
2195#ifndef ARCH_MIN_MMSTRUCT_ALIGN
2196#define ARCH_MIN_MMSTRUCT_ALIGN 0
2197#endif
2198
2199static void sighand_ctor(void *data)
2200{
2201 struct sighand_struct *sighand = data;
2202
2203 spin_lock_init(&sighand->siglock);
2204 init_waitqueue_head(&sighand->signalfd_wqh);
2205}
2206
2207void __init proc_caches_init(void)
2208{
2209 sighand_cachep = kmem_cache_create("sighand_cache",
2210 sizeof(struct sighand_struct), 0,
2211 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
2212 SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
2213 signal_cachep = kmem_cache_create("signal_cache",
2214 sizeof(struct signal_struct), 0,
2215 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2216 NULL);
2217 files_cachep = kmem_cache_create("files_cache",
2218 sizeof(struct files_struct), 0,
2219 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2220 NULL);
2221 fs_cachep = kmem_cache_create("fs_cache",
2222 sizeof(struct fs_struct), 0,
2223 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2224 NULL);
2225
2226
2227
2228
2229
2230
2231
2232 mm_cachep = kmem_cache_create("mm_struct",
2233 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
2234 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2235 NULL);
2236 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
2237 mmap_init();
2238 nsproxy_cache_init();
2239}
2240
2241
2242
2243
2244static int check_unshare_flags(unsigned long unshare_flags)
2245{
2246 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
2247 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
2248 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
2249 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP))
2250 return -EINVAL;
2251
2252
2253
2254
2255
2256
2257 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
2258 if (!thread_group_empty(current))
2259 return -EINVAL;
2260 }
2261 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
2262 if (atomic_read(¤t->sighand->count) > 1)
2263 return -EINVAL;
2264 }
2265 if (unshare_flags & CLONE_VM) {
2266 if (!current_is_single_threaded())
2267 return -EINVAL;
2268 }
2269
2270 return 0;
2271}
2272
2273
2274
2275
2276static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
2277{
2278 struct fs_struct *fs = current->fs;
2279
2280 if (!(unshare_flags & CLONE_FS) || !fs)
2281 return 0;
2282
2283
2284 if (fs->users == 1)
2285 return 0;
2286
2287 *new_fsp = copy_fs_struct(fs);
2288 if (!*new_fsp)
2289 return -ENOMEM;
2290
2291 return 0;
2292}
2293
2294
2295
2296
2297static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
2298{
2299 struct files_struct *fd = current->files;
2300 int error = 0;
2301
2302 if ((unshare_flags & CLONE_FILES) &&
2303 (fd && atomic_read(&fd->count) > 1)) {
2304 *new_fdp = dup_fd(fd, &error);
2305 if (!*new_fdp)
2306 return error;
2307 }
2308
2309 return 0;
2310}
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
2321{
2322 struct fs_struct *fs, *new_fs = NULL;
2323 struct files_struct *fd, *new_fd = NULL;
2324 struct cred *new_cred = NULL;
2325 struct nsproxy *new_nsproxy = NULL;
2326 int do_sysvsem = 0;
2327 int err;
2328
2329
2330
2331
2332
2333 if (unshare_flags & CLONE_NEWUSER)
2334 unshare_flags |= CLONE_THREAD | CLONE_FS;
2335
2336
2337
2338 if (unshare_flags & CLONE_VM)
2339 unshare_flags |= CLONE_SIGHAND;
2340
2341
2342
2343 if (unshare_flags & CLONE_SIGHAND)
2344 unshare_flags |= CLONE_THREAD;
2345
2346
2347
2348 if (unshare_flags & CLONE_NEWNS)
2349 unshare_flags |= CLONE_FS;
2350
2351 err = check_unshare_flags(unshare_flags);
2352 if (err)
2353 goto bad_unshare_out;
2354
2355
2356
2357
2358
2359 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
2360 do_sysvsem = 1;
2361 err = unshare_fs(unshare_flags, &new_fs);
2362 if (err)
2363 goto bad_unshare_out;
2364 err = unshare_fd(unshare_flags, &new_fd);
2365 if (err)
2366 goto bad_unshare_cleanup_fs;
2367 err = unshare_userns(unshare_flags, &new_cred);
2368 if (err)
2369 goto bad_unshare_cleanup_fd;
2370 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
2371 new_cred, new_fs);
2372 if (err)
2373 goto bad_unshare_cleanup_cred;
2374
2375 if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
2376 if (do_sysvsem) {
2377
2378
2379
2380 exit_sem(current);
2381 }
2382 if (unshare_flags & CLONE_NEWIPC) {
2383
2384 exit_shm(current);
2385 shm_init_task(current);
2386 }
2387
2388 if (new_nsproxy)
2389 switch_task_namespaces(current, new_nsproxy);
2390
2391 task_lock(current);
2392
2393 if (new_fs) {
2394 fs = current->fs;
2395 spin_lock(&fs->lock);
2396 current->fs = new_fs;
2397 if (--fs->users)
2398 new_fs = NULL;
2399 else
2400 new_fs = fs;
2401 spin_unlock(&fs->lock);
2402 }
2403
2404 if (new_fd) {
2405 fd = current->files;
2406 current->files = new_fd;
2407 new_fd = fd;
2408 }
2409
2410 task_unlock(current);
2411
2412 if (new_cred) {
2413
2414 commit_creds(new_cred);
2415 new_cred = NULL;
2416 }
2417 }
2418
2419 perf_event_namespaces(current);
2420
2421bad_unshare_cleanup_cred:
2422 if (new_cred)
2423 put_cred(new_cred);
2424bad_unshare_cleanup_fd:
2425 if (new_fd)
2426 put_files_struct(new_fd);
2427
2428bad_unshare_cleanup_fs:
2429 if (new_fs)
2430 free_fs_struct(new_fs);
2431
2432bad_unshare_out:
2433 return err;
2434}
2435
2436
2437
2438
2439
2440
2441
2442int unshare_files(struct files_struct **displaced)
2443{
2444 struct task_struct *task = current;
2445 struct files_struct *copy = NULL;
2446 int error;
2447
2448 error = unshare_fd(CLONE_FILES, ©);
2449 if (error || !copy) {
2450 *displaced = NULL;
2451 return error;
2452 }
2453 *displaced = task->files;
2454 task_lock(task);
2455 task->files = copy;
2456 task_unlock(task);
2457 return 0;
2458}
2459
2460int sysctl_max_threads(struct ctl_table *table, int write,
2461 void __user *buffer, size_t *lenp, loff_t *ppos)
2462{
2463 struct ctl_table t;
2464 int ret;
2465 int threads = max_threads;
2466 int min = MIN_THREADS;
2467 int max = MAX_THREADS;
2468
2469 t = *table;
2470 t.data = &threads;
2471 t.extra1 = &min;
2472 t.extra2 = &max;
2473
2474 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2475 if (ret || !write)
2476 return ret;
2477
2478 set_max_threads(threads);
2479
2480 return 0;
2481}
2482