1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/unistd.h>
17#include <linux/module.h>
18#include <linux/vmalloc.h>
19#include <linux/completion.h>
20#include <linux/personality.h>
21#include <linux/mempolicy.h>
22#include <linux/sem.h>
23#include <linux/file.h>
24#include <linux/fdtable.h>
25#include <linux/iocontext.h>
26#include <linux/key.h>
27#include <linux/binfmts.h>
28#include <linux/mman.h>
29#include <linux/mmu_notifier.h>
30#include <linux/fs.h>
31#include <linux/nsproxy.h>
32#include <linux/capability.h>
33#include <linux/cpu.h>
34#include <linux/cgroup.h>
35#include <linux/security.h>
36#include <linux/hugetlb.h>
37#include <linux/swap.h>
38#include <linux/syscalls.h>
39#include <linux/jiffies.h>
40#include <linux/futex.h>
41#include <linux/compat.h>
42#include <linux/kthread.h>
43#include <linux/task_io_accounting_ops.h>
44#include <linux/rcupdate.h>
45#include <linux/ptrace.h>
46#include <linux/mount.h>
47#include <linux/audit.h>
48#include <linux/memcontrol.h>
49#include <linux/ftrace.h>
50#include <linux/profile.h>
51#include <linux/rmap.h>
52#include <linux/ksm.h>
53#include <linux/acct.h>
54#include <linux/tsacct_kern.h>
55#include <linux/cn_proc.h>
56#include <linux/freezer.h>
57#include <linux/delayacct.h>
58#include <linux/taskstats_kern.h>
59#include <linux/random.h>
60#include <linux/tty.h>
61#include <linux/blkdev.h>
62#include <linux/fs_struct.h>
63#include <linux/magic.h>
64#include <linux/perf_event.h>
65#include <linux/posix-timers.h>
66#include <linux/user-return-notifier.h>
67#include <linux/oom.h>
68#include <linux/khugepaged.h>
69
70#include <asm/pgtable.h>
71#include <asm/pgalloc.h>
72#include <asm/uaccess.h>
73#include <asm/mmu_context.h>
74#include <asm/cacheflush.h>
75#include <asm/tlbflush.h>
76
77#include <trace/events/sched.h>
78
79
80
81
82unsigned long total_forks;
83int nr_threads;
84
85int max_threads;
86
87DEFINE_PER_CPU(unsigned long, process_counts) = 0;
88
89__cacheline_aligned DEFINE_RWLOCK(tasklist_lock);
90
91#ifdef CONFIG_PROVE_RCU
92int lockdep_tasklist_lock_is_held(void)
93{
94 return lockdep_is_held(&tasklist_lock);
95}
96EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
97#endif
98
99int nr_processes(void)
100{
101 int cpu;
102 int total = 0;
103
104 for_each_possible_cpu(cpu)
105 total += per_cpu(process_counts, cpu);
106
107 return total;
108}
109
110#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
111# define alloc_task_struct_node(node) \
112 kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node)
113# define free_task_struct(tsk) \
114 kmem_cache_free(task_struct_cachep, (tsk))
115static struct kmem_cache *task_struct_cachep;
116#endif
117
118#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
119static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
120 int node)
121{
122#ifdef CONFIG_DEBUG_STACK_USAGE
123 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
124#else
125 gfp_t mask = GFP_KERNEL;
126#endif
127 struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
128
129 return page ? page_address(page) : NULL;
130}
131
132static inline void free_thread_info(struct thread_info *ti)
133{
134 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
135}
136#endif
137
138
139static struct kmem_cache *signal_cachep;
140
141
142struct kmem_cache *sighand_cachep;
143
144
145struct kmem_cache *files_cachep;
146
147
148struct kmem_cache *fs_cachep;
149
150
151struct kmem_cache *vm_area_cachep;
152
153
154static struct kmem_cache *mm_cachep;
155
156static void account_kernel_stack(struct thread_info *ti, int account)
157{
158 struct zone *zone = page_zone(virt_to_page(ti));
159
160 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
161}
162
163void free_task(struct task_struct *tsk)
164{
165 prop_local_destroy_single(&tsk->dirties);
166 account_kernel_stack(tsk->stack, -1);
167 free_thread_info(tsk->stack);
168 rt_mutex_debug_task_free(tsk);
169 ftrace_graph_exit_task(tsk);
170 free_task_struct(tsk);
171}
172EXPORT_SYMBOL(free_task);
173
174static inline void free_signal_struct(struct signal_struct *sig)
175{
176 taskstats_tgid_free(sig);
177 sched_autogroup_exit(sig);
178 kmem_cache_free(signal_cachep, sig);
179}
180
181static inline void put_signal_struct(struct signal_struct *sig)
182{
183 if (atomic_dec_and_test(&sig->sigcnt))
184 free_signal_struct(sig);
185}
186
187void __put_task_struct(struct task_struct *tsk)
188{
189 WARN_ON(!tsk->exit_state);
190 WARN_ON(atomic_read(&tsk->usage));
191 WARN_ON(tsk == current);
192
193 exit_creds(tsk);
194 delayacct_tsk_free(tsk);
195 put_signal_struct(tsk->signal);
196
197 if (!profile_handoff_task(tsk))
198 free_task(tsk);
199}
200EXPORT_SYMBOL_GPL(__put_task_struct);
201
202
203
204
205
206#ifndef arch_task_cache_init
207#define arch_task_cache_init()
208#endif
209
210void __init fork_init(unsigned long mempages)
211{
212#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
213#ifndef ARCH_MIN_TASKALIGN
214#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
215#endif
216
217 task_struct_cachep =
218 kmem_cache_create("task_struct", sizeof(struct task_struct),
219 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
220#endif
221
222
223 arch_task_cache_init();
224
225
226
227
228
229
230 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
231
232
233
234
235 if (max_threads < 20)
236 max_threads = 20;
237
238 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
239 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
240 init_task.signal->rlim[RLIMIT_SIGPENDING] =
241 init_task.signal->rlim[RLIMIT_NPROC];
242}
243
244int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
245 struct task_struct *src)
246{
247 *dst = *src;
248 return 0;
249}
250
251static struct task_struct *dup_task_struct(struct task_struct *orig)
252{
253 struct task_struct *tsk;
254 struct thread_info *ti;
255 unsigned long *stackend;
256 int node = tsk_fork_get_node(orig);
257 int err;
258
259 prepare_to_copy(orig);
260
261 tsk = alloc_task_struct_node(node);
262 if (!tsk)
263 return NULL;
264
265 ti = alloc_thread_info_node(tsk, node);
266 if (!ti) {
267 free_task_struct(tsk);
268 return NULL;
269 }
270
271 err = arch_dup_task_struct(tsk, orig);
272 if (err)
273 goto out;
274
275 tsk->stack = ti;
276
277 err = prop_local_init_single(&tsk->dirties);
278 if (err)
279 goto out;
280
281 setup_thread_stack(tsk, orig);
282 clear_user_return_notifier(tsk);
283 clear_tsk_need_resched(tsk);
284 stackend = end_of_stack(tsk);
285 *stackend = STACK_END_MAGIC;
286
287#ifdef CONFIG_CC_STACKPROTECTOR
288 tsk->stack_canary = get_random_int();
289#endif
290
291
292
293
294
295 atomic_set(&tsk->usage, 2);
296#ifdef CONFIG_BLK_DEV_IO_TRACE
297 tsk->btrace_seq = 0;
298#endif
299 tsk->splice_pipe = NULL;
300
301 account_kernel_stack(ti, 1);
302
303 return tsk;
304
305out:
306 free_thread_info(ti);
307 free_task_struct(tsk);
308 return NULL;
309}
310
311#ifdef CONFIG_MMU
312static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
313{
314 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
315 struct rb_node **rb_link, *rb_parent;
316 int retval;
317 unsigned long charge;
318 struct mempolicy *pol;
319
320 down_write(&oldmm->mmap_sem);
321 flush_cache_dup_mm(oldmm);
322
323
324
325 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
326
327 mm->locked_vm = 0;
328 mm->mmap = NULL;
329 mm->mmap_cache = NULL;
330 mm->free_area_cache = oldmm->mmap_base;
331 mm->cached_hole_size = ~0UL;
332 mm->map_count = 0;
333 cpumask_clear(mm_cpumask(mm));
334 mm->mm_rb = RB_ROOT;
335 rb_link = &mm->mm_rb.rb_node;
336 rb_parent = NULL;
337 pprev = &mm->mmap;
338 retval = ksm_fork(mm, oldmm);
339 if (retval)
340 goto out;
341 retval = khugepaged_fork(mm, oldmm);
342 if (retval)
343 goto out;
344
345 prev = NULL;
346 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
347 struct file *file;
348
349 if (mpnt->vm_flags & VM_DONTCOPY) {
350 long pages = vma_pages(mpnt);
351 mm->total_vm -= pages;
352 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
353 -pages);
354 continue;
355 }
356 charge = 0;
357 if (mpnt->vm_flags & VM_ACCOUNT) {
358 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
359 if (security_vm_enough_memory(len))
360 goto fail_nomem;
361 charge = len;
362 }
363 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
364 if (!tmp)
365 goto fail_nomem;
366 *tmp = *mpnt;
367 INIT_LIST_HEAD(&tmp->anon_vma_chain);
368 pol = mpol_dup(vma_policy(mpnt));
369 retval = PTR_ERR(pol);
370 if (IS_ERR(pol))
371 goto fail_nomem_policy;
372 vma_set_policy(tmp, pol);
373 tmp->vm_mm = mm;
374 if (anon_vma_fork(tmp, mpnt))
375 goto fail_nomem_anon_vma_fork;
376 tmp->vm_flags &= ~VM_LOCKED;
377 tmp->vm_next = tmp->vm_prev = NULL;
378 file = tmp->vm_file;
379 if (file) {
380 struct inode *inode = file->f_path.dentry->d_inode;
381 struct address_space *mapping = file->f_mapping;
382
383 get_file(file);
384 if (tmp->vm_flags & VM_DENYWRITE)
385 atomic_dec(&inode->i_writecount);
386 mutex_lock(&mapping->i_mmap_mutex);
387 if (tmp->vm_flags & VM_SHARED)
388 mapping->i_mmap_writable++;
389 flush_dcache_mmap_lock(mapping);
390
391 vma_prio_tree_add(tmp, mpnt);
392 flush_dcache_mmap_unlock(mapping);
393 mutex_unlock(&mapping->i_mmap_mutex);
394 }
395
396
397
398
399
400
401 if (is_vm_hugetlb_page(tmp))
402 reset_vma_resv_huge_pages(tmp);
403
404
405
406
407 *pprev = tmp;
408 pprev = &tmp->vm_next;
409 tmp->vm_prev = prev;
410 prev = tmp;
411
412 __vma_link_rb(mm, tmp, rb_link, rb_parent);
413 rb_link = &tmp->vm_rb.rb_right;
414 rb_parent = &tmp->vm_rb;
415
416 mm->map_count++;
417 retval = copy_page_range(mm, oldmm, mpnt);
418
419 if (tmp->vm_ops && tmp->vm_ops->open)
420 tmp->vm_ops->open(tmp);
421
422 if (retval)
423 goto out;
424 }
425
426 arch_dup_mmap(oldmm, mm);
427 retval = 0;
428out:
429 up_write(&mm->mmap_sem);
430 flush_tlb_mm(oldmm);
431 up_write(&oldmm->mmap_sem);
432 return retval;
433fail_nomem_anon_vma_fork:
434 mpol_put(pol);
435fail_nomem_policy:
436 kmem_cache_free(vm_area_cachep, tmp);
437fail_nomem:
438 retval = -ENOMEM;
439 vm_unacct_memory(charge);
440 goto out;
441}
442
443static inline int mm_alloc_pgd(struct mm_struct *mm)
444{
445 mm->pgd = pgd_alloc(mm);
446 if (unlikely(!mm->pgd))
447 return -ENOMEM;
448 return 0;
449}
450
451static inline void mm_free_pgd(struct mm_struct *mm)
452{
453 pgd_free(mm, mm->pgd);
454}
455#else
456#define dup_mmap(mm, oldmm) (0)
457#define mm_alloc_pgd(mm) (0)
458#define mm_free_pgd(mm)
459#endif
460
461__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
462
463#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
464#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
465
466static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
467
468static int __init coredump_filter_setup(char *s)
469{
470 default_dump_filter =
471 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
472 MMF_DUMP_FILTER_MASK;
473 return 1;
474}
475
476__setup("coredump_filter=", coredump_filter_setup);
477
478#include <linux/init_task.h>
479
480static void mm_init_aio(struct mm_struct *mm)
481{
482#ifdef CONFIG_AIO
483 spin_lock_init(&mm->ioctx_lock);
484 INIT_HLIST_HEAD(&mm->ioctx_list);
485#endif
486}
487
488static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
489{
490 atomic_set(&mm->mm_users, 1);
491 atomic_set(&mm->mm_count, 1);
492 init_rwsem(&mm->mmap_sem);
493 INIT_LIST_HEAD(&mm->mmlist);
494 mm->flags = (current->mm) ?
495 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
496 mm->core_state = NULL;
497 mm->nr_ptes = 0;
498 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
499 spin_lock_init(&mm->page_table_lock);
500 mm->free_area_cache = TASK_UNMAPPED_BASE;
501 mm->cached_hole_size = ~0UL;
502 mm_init_aio(mm);
503 mm_init_owner(mm, p);
504 atomic_set(&mm->oom_disable_count, 0);
505
506 if (likely(!mm_alloc_pgd(mm))) {
507 mm->def_flags = 0;
508 mmu_notifier_mm_init(mm);
509 return mm;
510 }
511
512 free_mm(mm);
513 return NULL;
514}
515
516
517
518
519struct mm_struct *mm_alloc(void)
520{
521 struct mm_struct *mm;
522
523 mm = allocate_mm();
524 if (!mm)
525 return NULL;
526
527 memset(mm, 0, sizeof(*mm));
528 mm_init_cpumask(mm);
529 return mm_init(mm, current);
530}
531
532
533
534
535
536
537void __mmdrop(struct mm_struct *mm)
538{
539 BUG_ON(mm == &init_mm);
540 mm_free_pgd(mm);
541 destroy_context(mm);
542 mmu_notifier_mm_destroy(mm);
543#ifdef CONFIG_TRANSPARENT_HUGEPAGE
544 VM_BUG_ON(mm->pmd_huge_pte);
545#endif
546 free_mm(mm);
547}
548EXPORT_SYMBOL_GPL(__mmdrop);
549
550
551
552
553void mmput(struct mm_struct *mm)
554{
555 might_sleep();
556
557 if (atomic_dec_and_test(&mm->mm_users)) {
558 exit_aio(mm);
559 ksm_exit(mm);
560 khugepaged_exit(mm);
561 exit_mmap(mm);
562 set_mm_exe_file(mm, NULL);
563 if (!list_empty(&mm->mmlist)) {
564 spin_lock(&mmlist_lock);
565 list_del(&mm->mmlist);
566 spin_unlock(&mmlist_lock);
567 }
568 put_swap_token(mm);
569 if (mm->binfmt)
570 module_put(mm->binfmt->module);
571 mmdrop(mm);
572 }
573}
574EXPORT_SYMBOL_GPL(mmput);
575
576
577
578
579
580
581void added_exe_file_vma(struct mm_struct *mm)
582{
583 mm->num_exe_file_vmas++;
584}
585
586void removed_exe_file_vma(struct mm_struct *mm)
587{
588 mm->num_exe_file_vmas--;
589 if ((mm->num_exe_file_vmas == 0) && mm->exe_file) {
590 fput(mm->exe_file);
591 mm->exe_file = NULL;
592 }
593
594}
595
596void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
597{
598 if (new_exe_file)
599 get_file(new_exe_file);
600 if (mm->exe_file)
601 fput(mm->exe_file);
602 mm->exe_file = new_exe_file;
603 mm->num_exe_file_vmas = 0;
604}
605
606struct file *get_mm_exe_file(struct mm_struct *mm)
607{
608 struct file *exe_file;
609
610
611
612 down_read(&mm->mmap_sem);
613 exe_file = mm->exe_file;
614 if (exe_file)
615 get_file(exe_file);
616 up_read(&mm->mmap_sem);
617 return exe_file;
618}
619
620static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
621{
622
623
624 newmm->exe_file = get_mm_exe_file(oldmm);
625}
626
627
628
629
630
631
632
633
634
635
636struct mm_struct *get_task_mm(struct task_struct *task)
637{
638 struct mm_struct *mm;
639
640 task_lock(task);
641 mm = task->mm;
642 if (mm) {
643 if (task->flags & PF_KTHREAD)
644 mm = NULL;
645 else
646 atomic_inc(&mm->mm_users);
647 }
648 task_unlock(task);
649 return mm;
650}
651EXPORT_SYMBOL_GPL(get_task_mm);
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666void mm_release(struct task_struct *tsk, struct mm_struct *mm)
667{
668 struct completion *vfork_done = tsk->vfork_done;
669
670
671#ifdef CONFIG_FUTEX
672 if (unlikely(tsk->robust_list)) {
673 exit_robust_list(tsk);
674 tsk->robust_list = NULL;
675 }
676#ifdef CONFIG_COMPAT
677 if (unlikely(tsk->compat_robust_list)) {
678 compat_exit_robust_list(tsk);
679 tsk->compat_robust_list = NULL;
680 }
681#endif
682 if (unlikely(!list_empty(&tsk->pi_state_list)))
683 exit_pi_state_list(tsk);
684#endif
685
686
687 deactivate_mm(tsk, mm);
688
689
690 if (vfork_done) {
691 tsk->vfork_done = NULL;
692 complete(vfork_done);
693 }
694
695
696
697
698
699
700
701 if (tsk->clear_child_tid) {
702 if (!(tsk->flags & PF_SIGNALED) &&
703 atomic_read(&mm->mm_users) > 1) {
704
705
706
707
708 put_user(0, tsk->clear_child_tid);
709 sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
710 1, NULL, NULL, 0);
711 }
712 tsk->clear_child_tid = NULL;
713 }
714}
715
716
717
718
719
720struct mm_struct *dup_mm(struct task_struct *tsk)
721{
722 struct mm_struct *mm, *oldmm = current->mm;
723 int err;
724
725 if (!oldmm)
726 return NULL;
727
728 mm = allocate_mm();
729 if (!mm)
730 goto fail_nomem;
731
732 memcpy(mm, oldmm, sizeof(*mm));
733 mm_init_cpumask(mm);
734
735
736 mm->token_priority = 0;
737 mm->last_interval = 0;
738
739#ifdef CONFIG_TRANSPARENT_HUGEPAGE
740 mm->pmd_huge_pte = NULL;
741#endif
742
743 if (!mm_init(mm, tsk))
744 goto fail_nomem;
745
746 if (init_new_context(tsk, mm))
747 goto fail_nocontext;
748
749 dup_mm_exe_file(oldmm, mm);
750
751 err = dup_mmap(mm, oldmm);
752 if (err)
753 goto free_pt;
754
755 mm->hiwater_rss = get_mm_rss(mm);
756 mm->hiwater_vm = mm->total_vm;
757
758 if (mm->binfmt && !try_module_get(mm->binfmt->module))
759 goto free_pt;
760
761 return mm;
762
763free_pt:
764
765 mm->binfmt = NULL;
766 mmput(mm);
767
768fail_nomem:
769 return NULL;
770
771fail_nocontext:
772
773
774
775
776 mm_free_pgd(mm);
777 free_mm(mm);
778 return NULL;
779}
780
781static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
782{
783 struct mm_struct *mm, *oldmm;
784 int retval;
785
786 tsk->min_flt = tsk->maj_flt = 0;
787 tsk->nvcsw = tsk->nivcsw = 0;
788#ifdef CONFIG_DETECT_HUNG_TASK
789 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
790#endif
791
792 tsk->mm = NULL;
793 tsk->active_mm = NULL;
794
795
796
797
798
799
800 oldmm = current->mm;
801 if (!oldmm)
802 return 0;
803
804 if (clone_flags & CLONE_VM) {
805 atomic_inc(&oldmm->mm_users);
806 mm = oldmm;
807 goto good_mm;
808 }
809
810 retval = -ENOMEM;
811 mm = dup_mm(tsk);
812 if (!mm)
813 goto fail_nomem;
814
815good_mm:
816
817 mm->token_priority = 0;
818 mm->last_interval = 0;
819 if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
820 atomic_inc(&mm->oom_disable_count);
821
822 tsk->mm = mm;
823 tsk->active_mm = mm;
824 return 0;
825
826fail_nomem:
827 return retval;
828}
829
830static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
831{
832 struct fs_struct *fs = current->fs;
833 if (clone_flags & CLONE_FS) {
834
835 spin_lock(&fs->lock);
836 if (fs->in_exec) {
837 spin_unlock(&fs->lock);
838 return -EAGAIN;
839 }
840 fs->users++;
841 spin_unlock(&fs->lock);
842 return 0;
843 }
844 tsk->fs = copy_fs_struct(fs);
845 if (!tsk->fs)
846 return -ENOMEM;
847 return 0;
848}
849
850static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
851{
852 struct files_struct *oldf, *newf;
853 int error = 0;
854
855
856
857
858 oldf = current->files;
859 if (!oldf)
860 goto out;
861
862 if (clone_flags & CLONE_FILES) {
863 atomic_inc(&oldf->count);
864 goto out;
865 }
866
867 newf = dup_fd(oldf, &error);
868 if (!newf)
869 goto out;
870
871 tsk->files = newf;
872 error = 0;
873out:
874 return error;
875}
876
877static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
878{
879#ifdef CONFIG_BLOCK
880 struct io_context *ioc = current->io_context;
881
882 if (!ioc)
883 return 0;
884
885
886
887 if (clone_flags & CLONE_IO) {
888 tsk->io_context = ioc_task_link(ioc);
889 if (unlikely(!tsk->io_context))
890 return -ENOMEM;
891 } else if (ioprio_valid(ioc->ioprio)) {
892 tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
893 if (unlikely(!tsk->io_context))
894 return -ENOMEM;
895
896 tsk->io_context->ioprio = ioc->ioprio;
897 }
898#endif
899 return 0;
900}
901
902static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
903{
904 struct sighand_struct *sig;
905
906 if (clone_flags & CLONE_SIGHAND) {
907 atomic_inc(¤t->sighand->count);
908 return 0;
909 }
910 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
911 rcu_assign_pointer(tsk->sighand, sig);
912 if (!sig)
913 return -ENOMEM;
914 atomic_set(&sig->count, 1);
915 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
916 return 0;
917}
918
919void __cleanup_sighand(struct sighand_struct *sighand)
920{
921 if (atomic_dec_and_test(&sighand->count))
922 kmem_cache_free(sighand_cachep, sighand);
923}
924
925
926
927
928
929static void posix_cpu_timers_init_group(struct signal_struct *sig)
930{
931 unsigned long cpu_limit;
932
933
934 thread_group_cputime_init(sig);
935
936 cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
937 if (cpu_limit != RLIM_INFINITY) {
938 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
939 sig->cputimer.running = 1;
940 }
941
942
943 INIT_LIST_HEAD(&sig->cpu_timers[0]);
944 INIT_LIST_HEAD(&sig->cpu_timers[1]);
945 INIT_LIST_HEAD(&sig->cpu_timers[2]);
946}
947
948static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
949{
950 struct signal_struct *sig;
951
952 if (clone_flags & CLONE_THREAD)
953 return 0;
954
955 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
956 tsk->signal = sig;
957 if (!sig)
958 return -ENOMEM;
959
960 sig->nr_threads = 1;
961 atomic_set(&sig->live, 1);
962 atomic_set(&sig->sigcnt, 1);
963 init_waitqueue_head(&sig->wait_chldexit);
964 if (clone_flags & CLONE_NEWPID)
965 sig->flags |= SIGNAL_UNKILLABLE;
966 sig->curr_target = tsk;
967 init_sigpending(&sig->shared_pending);
968 INIT_LIST_HEAD(&sig->posix_timers);
969
970 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
971 sig->real_timer.function = it_real_fn;
972
973 task_lock(current->group_leader);
974 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
975 task_unlock(current->group_leader);
976
977 posix_cpu_timers_init_group(sig);
978
979 tty_audit_fork(sig);
980 sched_autogroup_fork(sig);
981
982#ifdef CONFIG_CGROUPS
983 init_rwsem(&sig->threadgroup_fork_lock);
984#endif
985
986 sig->oom_adj = current->signal->oom_adj;
987 sig->oom_score_adj = current->signal->oom_score_adj;
988 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
989
990 mutex_init(&sig->cred_guard_mutex);
991
992 return 0;
993}
994
995static void copy_flags(unsigned long clone_flags, struct task_struct *p)
996{
997 unsigned long new_flags = p->flags;
998
999 new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1000 new_flags |= PF_FORKNOEXEC;
1001 new_flags |= PF_STARTING;
1002 p->flags = new_flags;
1003 clear_freeze_flag(p);
1004}
1005
1006SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1007{
1008 current->clear_child_tid = tidptr;
1009
1010 return task_pid_vnr(current);
1011}
1012
1013static void rt_mutex_init_task(struct task_struct *p)
1014{
1015 raw_spin_lock_init(&p->pi_lock);
1016#ifdef CONFIG_RT_MUTEXES
1017 plist_head_init(&p->pi_waiters);
1018 p->pi_blocked_on = NULL;
1019#endif
1020}
1021
1022#ifdef CONFIG_MM_OWNER
1023void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1024{
1025 mm->owner = p;
1026}
1027#endif
1028
1029
1030
1031
1032static void posix_cpu_timers_init(struct task_struct *tsk)
1033{
1034 tsk->cputime_expires.prof_exp = cputime_zero;
1035 tsk->cputime_expires.virt_exp = cputime_zero;
1036 tsk->cputime_expires.sched_exp = 0;
1037 INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1038 INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1039 INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050static struct task_struct *copy_process(unsigned long clone_flags,
1051 unsigned long stack_start,
1052 struct pt_regs *regs,
1053 unsigned long stack_size,
1054 int __user *child_tidptr,
1055 struct pid *pid,
1056 int trace)
1057{
1058 int retval;
1059 struct task_struct *p;
1060 int cgroup_callbacks_done = 0;
1061
1062 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1063 return ERR_PTR(-EINVAL);
1064
1065
1066
1067
1068
1069 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1070 return ERR_PTR(-EINVAL);
1071
1072
1073
1074
1075
1076
1077 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1078 return ERR_PTR(-EINVAL);
1079
1080
1081
1082
1083
1084
1085
1086 if ((clone_flags & CLONE_PARENT) &&
1087 current->signal->flags & SIGNAL_UNKILLABLE)
1088 return ERR_PTR(-EINVAL);
1089
1090 retval = security_task_create(clone_flags);
1091 if (retval)
1092 goto fork_out;
1093
1094 retval = -ENOMEM;
1095 p = dup_task_struct(current);
1096 if (!p)
1097 goto fork_out;
1098
1099 ftrace_graph_init_task(p);
1100
1101 rt_mutex_init_task(p);
1102
1103#ifdef CONFIG_PROVE_LOCKING
1104 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1105 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1106#endif
1107 retval = -EAGAIN;
1108 if (atomic_read(&p->real_cred->user->processes) >=
1109 task_rlimit(p, RLIMIT_NPROC)) {
1110 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
1111 p->real_cred->user != INIT_USER)
1112 goto bad_fork_free;
1113 }
1114 current->flags &= ~PF_NPROC_EXCEEDED;
1115
1116 retval = copy_creds(p, clone_flags);
1117 if (retval < 0)
1118 goto bad_fork_free;
1119
1120
1121
1122
1123
1124
1125 retval = -EAGAIN;
1126 if (nr_threads >= max_threads)
1127 goto bad_fork_cleanup_count;
1128
1129 if (!try_module_get(task_thread_info(p)->exec_domain->module))
1130 goto bad_fork_cleanup_count;
1131
1132 p->did_exec = 0;
1133 delayacct_tsk_init(p);
1134 copy_flags(clone_flags, p);
1135 INIT_LIST_HEAD(&p->children);
1136 INIT_LIST_HEAD(&p->sibling);
1137 rcu_copy_process(p);
1138 p->vfork_done = NULL;
1139 spin_lock_init(&p->alloc_lock);
1140
1141 init_sigpending(&p->pending);
1142
1143 p->utime = cputime_zero;
1144 p->stime = cputime_zero;
1145 p->gtime = cputime_zero;
1146 p->utimescaled = cputime_zero;
1147 p->stimescaled = cputime_zero;
1148#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1149 p->prev_utime = cputime_zero;
1150 p->prev_stime = cputime_zero;
1151#endif
1152#if defined(SPLIT_RSS_COUNTING)
1153 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1154#endif
1155
1156 p->default_timer_slack_ns = current->timer_slack_ns;
1157
1158 task_io_accounting_init(&p->ioac);
1159 acct_clear_integrals(p);
1160
1161 posix_cpu_timers_init(p);
1162
1163 do_posix_clock_monotonic_gettime(&p->start_time);
1164 p->real_start_time = p->start_time;
1165 monotonic_to_bootbased(&p->real_start_time);
1166 p->io_context = NULL;
1167 p->audit_context = NULL;
1168 if (clone_flags & CLONE_THREAD)
1169 threadgroup_fork_read_lock(current);
1170 cgroup_fork(p);
1171#ifdef CONFIG_NUMA
1172 p->mempolicy = mpol_dup(p->mempolicy);
1173 if (IS_ERR(p->mempolicy)) {
1174 retval = PTR_ERR(p->mempolicy);
1175 p->mempolicy = NULL;
1176 goto bad_fork_cleanup_cgroup;
1177 }
1178 mpol_fix_fork_child_flag(p);
1179#endif
1180#ifdef CONFIG_CPUSETS
1181 p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1182 p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1183#endif
1184#ifdef CONFIG_TRACE_IRQFLAGS
1185 p->irq_events = 0;
1186#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1187 p->hardirqs_enabled = 1;
1188#else
1189 p->hardirqs_enabled = 0;
1190#endif
1191 p->hardirq_enable_ip = 0;
1192 p->hardirq_enable_event = 0;
1193 p->hardirq_disable_ip = _THIS_IP_;
1194 p->hardirq_disable_event = 0;
1195 p->softirqs_enabled = 1;
1196 p->softirq_enable_ip = _THIS_IP_;
1197 p->softirq_enable_event = 0;
1198 p->softirq_disable_ip = 0;
1199 p->softirq_disable_event = 0;
1200 p->hardirq_context = 0;
1201 p->softirq_context = 0;
1202#endif
1203#ifdef CONFIG_LOCKDEP
1204 p->lockdep_depth = 0;
1205 p->curr_chain_key = 0;
1206 p->lockdep_recursion = 0;
1207#endif
1208
1209#ifdef CONFIG_DEBUG_MUTEXES
1210 p->blocked_on = NULL;
1211#endif
1212#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1213 p->memcg_batch.do_batch = 0;
1214 p->memcg_batch.memcg = NULL;
1215#endif
1216
1217
1218 sched_fork(p);
1219
1220 retval = perf_event_init_task(p);
1221 if (retval)
1222 goto bad_fork_cleanup_policy;
1223 retval = audit_alloc(p);
1224 if (retval)
1225 goto bad_fork_cleanup_policy;
1226
1227 retval = copy_semundo(clone_flags, p);
1228 if (retval)
1229 goto bad_fork_cleanup_audit;
1230 retval = copy_files(clone_flags, p);
1231 if (retval)
1232 goto bad_fork_cleanup_semundo;
1233 retval = copy_fs(clone_flags, p);
1234 if (retval)
1235 goto bad_fork_cleanup_files;
1236 retval = copy_sighand(clone_flags, p);
1237 if (retval)
1238 goto bad_fork_cleanup_fs;
1239 retval = copy_signal(clone_flags, p);
1240 if (retval)
1241 goto bad_fork_cleanup_sighand;
1242 retval = copy_mm(clone_flags, p);
1243 if (retval)
1244 goto bad_fork_cleanup_signal;
1245 retval = copy_namespaces(clone_flags, p);
1246 if (retval)
1247 goto bad_fork_cleanup_mm;
1248 retval = copy_io(clone_flags, p);
1249 if (retval)
1250 goto bad_fork_cleanup_namespaces;
1251 retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
1252 if (retval)
1253 goto bad_fork_cleanup_io;
1254
1255 if (pid != &init_struct_pid) {
1256 retval = -ENOMEM;
1257 pid = alloc_pid(p->nsproxy->pid_ns);
1258 if (!pid)
1259 goto bad_fork_cleanup_io;
1260 }
1261
1262 p->pid = pid_nr(pid);
1263 p->tgid = p->pid;
1264 if (clone_flags & CLONE_THREAD)
1265 p->tgid = current->tgid;
1266
1267 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1268
1269
1270
1271 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1272#ifdef CONFIG_BLOCK
1273 p->plug = NULL;
1274#endif
1275#ifdef CONFIG_FUTEX
1276 p->robust_list = NULL;
1277#ifdef CONFIG_COMPAT
1278 p->compat_robust_list = NULL;
1279#endif
1280 INIT_LIST_HEAD(&p->pi_state_list);
1281 p->pi_state_cache = NULL;
1282#endif
1283
1284
1285
1286 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1287 p->sas_ss_sp = p->sas_ss_size = 0;
1288
1289
1290
1291
1292
1293 user_disable_single_step(p);
1294 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1295#ifdef TIF_SYSCALL_EMU
1296 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1297#endif
1298 clear_all_latency_tracing(p);
1299
1300
1301 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1302 p->pdeath_signal = 0;
1303 p->exit_state = 0;
1304
1305
1306
1307
1308
1309 p->group_leader = p;
1310 INIT_LIST_HEAD(&p->thread_group);
1311
1312
1313
1314
1315 cgroup_fork_callbacks(p);
1316 cgroup_callbacks_done = 1;
1317
1318
1319 write_lock_irq(&tasklist_lock);
1320
1321
1322 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1323 p->real_parent = current->real_parent;
1324 p->parent_exec_id = current->parent_exec_id;
1325 } else {
1326 p->real_parent = current;
1327 p->parent_exec_id = current->self_exec_id;
1328 }
1329
1330 spin_lock(¤t->sighand->siglock);
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340 recalc_sigpending();
1341 if (signal_pending(current)) {
1342 spin_unlock(¤t->sighand->siglock);
1343 write_unlock_irq(&tasklist_lock);
1344 retval = -ERESTARTNOINTR;
1345 goto bad_fork_free_pid;
1346 }
1347
1348 if (clone_flags & CLONE_THREAD) {
1349 current->signal->nr_threads++;
1350 atomic_inc(¤t->signal->live);
1351 atomic_inc(¤t->signal->sigcnt);
1352 p->group_leader = current->group_leader;
1353 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1354 }
1355
1356 if (likely(p->pid)) {
1357 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1358
1359 if (thread_group_leader(p)) {
1360 if (is_child_reaper(pid))
1361 p->nsproxy->pid_ns->child_reaper = p;
1362
1363 p->signal->leader_pid = pid;
1364 p->signal->tty = tty_kref_get(current->signal->tty);
1365 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1366 attach_pid(p, PIDTYPE_SID, task_session(current));
1367 list_add_tail(&p->sibling, &p->real_parent->children);
1368 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1369 __this_cpu_inc(process_counts);
1370 }
1371 attach_pid(p, PIDTYPE_PID, pid);
1372 nr_threads++;
1373 }
1374
1375 total_forks++;
1376 spin_unlock(¤t->sighand->siglock);
1377 write_unlock_irq(&tasklist_lock);
1378 proc_fork_connector(p);
1379 cgroup_post_fork(p);
1380 if (clone_flags & CLONE_THREAD)
1381 threadgroup_fork_read_unlock(current);
1382 perf_event_fork(p);
1383 return p;
1384
1385bad_fork_free_pid:
1386 if (pid != &init_struct_pid)
1387 free_pid(pid);
1388bad_fork_cleanup_io:
1389 if (p->io_context)
1390 exit_io_context(p);
1391bad_fork_cleanup_namespaces:
1392 exit_task_namespaces(p);
1393bad_fork_cleanup_mm:
1394 if (p->mm) {
1395 task_lock(p);
1396 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
1397 atomic_dec(&p->mm->oom_disable_count);
1398 task_unlock(p);
1399 mmput(p->mm);
1400 }
1401bad_fork_cleanup_signal:
1402 if (!(clone_flags & CLONE_THREAD))
1403 free_signal_struct(p->signal);
1404bad_fork_cleanup_sighand:
1405 __cleanup_sighand(p->sighand);
1406bad_fork_cleanup_fs:
1407 exit_fs(p);
1408bad_fork_cleanup_files:
1409 exit_files(p);
1410bad_fork_cleanup_semundo:
1411 exit_sem(p);
1412bad_fork_cleanup_audit:
1413 audit_free(p);
1414bad_fork_cleanup_policy:
1415 perf_event_free_task(p);
1416#ifdef CONFIG_NUMA
1417 mpol_put(p->mempolicy);
1418bad_fork_cleanup_cgroup:
1419#endif
1420 if (clone_flags & CLONE_THREAD)
1421 threadgroup_fork_read_unlock(current);
1422 cgroup_exit(p, cgroup_callbacks_done);
1423 delayacct_tsk_free(p);
1424 module_put(task_thread_info(p)->exec_domain->module);
1425bad_fork_cleanup_count:
1426 atomic_dec(&p->cred->user->processes);
1427 exit_creds(p);
1428bad_fork_free:
1429 free_task(p);
1430fork_out:
1431 return ERR_PTR(retval);
1432}
1433
1434noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1435{
1436 memset(regs, 0, sizeof(struct pt_regs));
1437 return regs;
1438}
1439
1440static inline void init_idle_pids(struct pid_link *links)
1441{
1442 enum pid_type type;
1443
1444 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1445 INIT_HLIST_NODE(&links[type].node);
1446 links[type].pid = &init_struct_pid;
1447 }
1448}
1449
1450struct task_struct * __cpuinit fork_idle(int cpu)
1451{
1452 struct task_struct *task;
1453 struct pt_regs regs;
1454
1455 task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL,
1456 &init_struct_pid, 0);
1457 if (!IS_ERR(task)) {
1458 init_idle_pids(task->pids);
1459 init_idle(task, cpu);
1460 }
1461
1462 return task;
1463}
1464
1465
1466
1467
1468
1469
1470
1471long do_fork(unsigned long clone_flags,
1472 unsigned long stack_start,
1473 struct pt_regs *regs,
1474 unsigned long stack_size,
1475 int __user *parent_tidptr,
1476 int __user *child_tidptr)
1477{
1478 struct task_struct *p;
1479 int trace = 0;
1480 long nr;
1481
1482
1483
1484
1485
1486 if (clone_flags & CLONE_NEWUSER) {
1487 if (clone_flags & CLONE_THREAD)
1488 return -EINVAL;
1489
1490
1491
1492 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
1493 !capable(CAP_SETGID))
1494 return -EPERM;
1495 }
1496
1497
1498
1499
1500
1501
1502
1503 if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
1504 if (clone_flags & CLONE_VFORK)
1505 trace = PTRACE_EVENT_VFORK;
1506 else if ((clone_flags & CSIGNAL) != SIGCHLD)
1507 trace = PTRACE_EVENT_CLONE;
1508 else
1509 trace = PTRACE_EVENT_FORK;
1510
1511 if (likely(!ptrace_event_enabled(current, trace)))
1512 trace = 0;
1513 }
1514
1515 p = copy_process(clone_flags, stack_start, regs, stack_size,
1516 child_tidptr, NULL, trace);
1517
1518
1519
1520
1521 if (!IS_ERR(p)) {
1522 struct completion vfork;
1523
1524 trace_sched_process_fork(current, p);
1525
1526 nr = task_pid_vnr(p);
1527
1528 if (clone_flags & CLONE_PARENT_SETTID)
1529 put_user(nr, parent_tidptr);
1530
1531 if (clone_flags & CLONE_VFORK) {
1532 p->vfork_done = &vfork;
1533 init_completion(&vfork);
1534 }
1535
1536 audit_finish_fork(p);
1537
1538
1539
1540
1541
1542
1543
1544 p->flags &= ~PF_STARTING;
1545
1546 wake_up_new_task(p);
1547
1548
1549 if (unlikely(trace))
1550 ptrace_event(trace, nr);
1551
1552 if (clone_flags & CLONE_VFORK) {
1553 freezer_do_not_count();
1554 wait_for_completion(&vfork);
1555 freezer_count();
1556 ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1557 }
1558 } else {
1559 nr = PTR_ERR(p);
1560 }
1561 return nr;
1562}
1563
1564#ifndef ARCH_MIN_MMSTRUCT_ALIGN
1565#define ARCH_MIN_MMSTRUCT_ALIGN 0
1566#endif
1567
1568static void sighand_ctor(void *data)
1569{
1570 struct sighand_struct *sighand = data;
1571
1572 spin_lock_init(&sighand->siglock);
1573 init_waitqueue_head(&sighand->signalfd_wqh);
1574}
1575
1576void __init proc_caches_init(void)
1577{
1578 sighand_cachep = kmem_cache_create("sighand_cache",
1579 sizeof(struct sighand_struct), 0,
1580 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
1581 SLAB_NOTRACK, sighand_ctor);
1582 signal_cachep = kmem_cache_create("signal_cache",
1583 sizeof(struct signal_struct), 0,
1584 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1585 files_cachep = kmem_cache_create("files_cache",
1586 sizeof(struct files_struct), 0,
1587 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1588 fs_cachep = kmem_cache_create("fs_cache",
1589 sizeof(struct fs_struct), 0,
1590 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1591
1592
1593
1594
1595
1596
1597
1598 mm_cachep = kmem_cache_create("mm_struct",
1599 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1600 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1601 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1602 mmap_init();
1603 nsproxy_cache_init();
1604}
1605
1606
1607
1608
1609static int check_unshare_flags(unsigned long unshare_flags)
1610{
1611 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1612 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1613 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
1614 return -EINVAL;
1615
1616
1617
1618
1619
1620 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1621
1622 if (atomic_read(¤t->mm->mm_users) > 1)
1623 return -EINVAL;
1624 }
1625
1626 return 0;
1627}
1628
1629
1630
1631
1632static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1633{
1634 struct fs_struct *fs = current->fs;
1635
1636 if (!(unshare_flags & CLONE_FS) || !fs)
1637 return 0;
1638
1639
1640 if (fs->users == 1)
1641 return 0;
1642
1643 *new_fsp = copy_fs_struct(fs);
1644 if (!*new_fsp)
1645 return -ENOMEM;
1646
1647 return 0;
1648}
1649
1650
1651
1652
1653static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1654{
1655 struct files_struct *fd = current->files;
1656 int error = 0;
1657
1658 if ((unshare_flags & CLONE_FILES) &&
1659 (fd && atomic_read(&fd->count) > 1)) {
1660 *new_fdp = dup_fd(fd, &error);
1661 if (!*new_fdp)
1662 return error;
1663 }
1664
1665 return 0;
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1677{
1678 struct fs_struct *fs, *new_fs = NULL;
1679 struct files_struct *fd, *new_fd = NULL;
1680 struct nsproxy *new_nsproxy = NULL;
1681 int do_sysvsem = 0;
1682 int err;
1683
1684 err = check_unshare_flags(unshare_flags);
1685 if (err)
1686 goto bad_unshare_out;
1687
1688
1689
1690
1691 if (unshare_flags & CLONE_NEWNS)
1692 unshare_flags |= CLONE_FS;
1693
1694
1695
1696
1697
1698 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
1699 do_sysvsem = 1;
1700 err = unshare_fs(unshare_flags, &new_fs);
1701 if (err)
1702 goto bad_unshare_out;
1703 err = unshare_fd(unshare_flags, &new_fd);
1704 if (err)
1705 goto bad_unshare_cleanup_fs;
1706 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs);
1707 if (err)
1708 goto bad_unshare_cleanup_fd;
1709
1710 if (new_fs || new_fd || do_sysvsem || new_nsproxy) {
1711 if (do_sysvsem) {
1712
1713
1714
1715 exit_sem(current);
1716 }
1717
1718 if (new_nsproxy) {
1719 switch_task_namespaces(current, new_nsproxy);
1720 new_nsproxy = NULL;
1721 }
1722
1723 task_lock(current);
1724
1725 if (new_fs) {
1726 fs = current->fs;
1727 spin_lock(&fs->lock);
1728 current->fs = new_fs;
1729 if (--fs->users)
1730 new_fs = NULL;
1731 else
1732 new_fs = fs;
1733 spin_unlock(&fs->lock);
1734 }
1735
1736 if (new_fd) {
1737 fd = current->files;
1738 current->files = new_fd;
1739 new_fd = fd;
1740 }
1741
1742 task_unlock(current);
1743 }
1744
1745 if (new_nsproxy)
1746 put_nsproxy(new_nsproxy);
1747
1748bad_unshare_cleanup_fd:
1749 if (new_fd)
1750 put_files_struct(new_fd);
1751
1752bad_unshare_cleanup_fs:
1753 if (new_fs)
1754 free_fs_struct(new_fs);
1755
1756bad_unshare_out:
1757 return err;
1758}
1759
1760
1761
1762
1763
1764
1765
1766int unshare_files(struct files_struct **displaced)
1767{
1768 struct task_struct *task = current;
1769 struct files_struct *copy = NULL;
1770 int error;
1771
1772 error = unshare_fd(CLONE_FILES, ©);
1773 if (error || !copy) {
1774 *displaced = NULL;
1775 return error;
1776 }
1777 *displaced = task->files;
1778 task_lock(task);
1779 task->files = copy;
1780 task_unlock(task);
1781 return 0;
1782}
1783