linux/arch/um/kernel/process.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
   3 * Copyright 2003 PathScale, Inc.
   4 * Licensed under the GPL
   5 */
   6
   7#include <linux/stddef.h>
   8#include <linux/err.h>
   9#include <linux/hardirq.h>
  10#include <linux/mm.h>
  11#include <linux/module.h>
  12#include <linux/personality.h>
  13#include <linux/proc_fs.h>
  14#include <linux/ptrace.h>
  15#include <linux/random.h>
  16#include <linux/slab.h>
  17#include <linux/sched.h>
  18#include <linux/seq_file.h>
  19#include <linux/tick.h>
  20#include <linux/threads.h>
  21#include <linux/tracehook.h>
  22#include <asm/current.h>
  23#include <asm/pgtable.h>
  24#include <asm/mmu_context.h>
  25#include <asm/uaccess.h>
  26#include <as-layout.h>
  27#include <kern_util.h>
  28#include <os.h>
  29#include <skas.h>
  30
  31/*
  32 * This is a per-cpu array.  A processor only modifies its entry and it only
  33 * cares about its entry, so it's OK if another processor is modifying its
  34 * entry.
  35 */
  36struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
  37
  38static inline int external_pid(void)
  39{
  40        /* FIXME: Need to look up userspace_pid by cpu */
  41        return userspace_pid[0];
  42}
  43
  44int pid_to_processor_id(int pid)
  45{
  46        int i;
  47
  48        for (i = 0; i < ncpus; i++) {
  49                if (cpu_tasks[i].pid == pid)
  50                        return i;
  51        }
  52        return -1;
  53}
  54
  55void free_stack(unsigned long stack, int order)
  56{
  57        free_pages(stack, order);
  58}
  59
  60unsigned long alloc_stack(int order, int atomic)
  61{
  62        unsigned long page;
  63        gfp_t flags = GFP_KERNEL;
  64
  65        if (atomic)
  66                flags = GFP_ATOMIC;
  67        page = __get_free_pages(flags, order);
  68
  69        return page;
  70}
  71
  72static inline void set_current(struct task_struct *task)
  73{
  74        cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
  75                { external_pid(), task });
  76}
  77
  78extern void arch_switch_to(struct task_struct *to);
  79
  80void *__switch_to(struct task_struct *from, struct task_struct *to)
  81{
  82        to->thread.prev_sched = from;
  83        set_current(to);
  84
  85        do {
  86                current->thread.saved_task = NULL;
  87
  88                switch_threads(&from->thread.switch_buf,
  89                               &to->thread.switch_buf);
  90
  91                arch_switch_to(current);
  92
  93                if (current->thread.saved_task)
  94                        show_regs(&(current->thread.regs));
  95                to = current->thread.saved_task;
  96                from = current;
  97        } while (current->thread.saved_task);
  98
  99        return current->thread.prev_sched;
 100}
 101
 102void interrupt_end(void)
 103{
 104        if (need_resched())
 105                schedule();
 106        if (test_thread_flag(TIF_SIGPENDING))
 107                do_signal();
 108        if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
 109                tracehook_notify_resume(&current->thread.regs);
 110}
 111
 112void exit_thread(void)
 113{
 114}
 115
 116int get_current_pid(void)
 117{
 118        return task_pid_nr(current);
 119}
 120
 121/*
 122 * This is called magically, by its address being stuffed in a jmp_buf
 123 * and being longjmp-d to.
 124 */
 125void new_thread_handler(void)
 126{
 127        int (*fn)(void *), n;
 128        void *arg;
 129
 130        if (current->thread.prev_sched != NULL)
 131                schedule_tail(current->thread.prev_sched);
 132        current->thread.prev_sched = NULL;
 133
 134        fn = current->thread.request.u.thread.proc;
 135        arg = current->thread.request.u.thread.arg;
 136
 137        /*
 138         * callback returns only if the kernel thread execs a process
 139         */
 140        n = fn(arg);
 141        userspace(&current->thread.regs.regs);
 142}
 143
 144/* Called magically, see new_thread_handler above */
 145void fork_handler(void)
 146{
 147        force_flush_all();
 148
 149        schedule_tail(current->thread.prev_sched);
 150
 151        /*
 152         * XXX: if interrupt_end() calls schedule, this call to
 153         * arch_switch_to isn't needed. We could want to apply this to
 154         * improve performance. -bb
 155         */
 156        arch_switch_to(current);
 157
 158        current->thread.prev_sched = NULL;
 159
 160        userspace(&current->thread.regs.regs);
 161}
 162
 163int copy_thread(unsigned long clone_flags, unsigned long sp,
 164                unsigned long arg, struct task_struct * p)
 165{
 166        void (*handler)(void);
 167        int kthread = current->flags & PF_KTHREAD;
 168        int ret = 0;
 169
 170        p->thread = (struct thread_struct) INIT_THREAD;
 171
 172        if (!kthread) {
 173                memcpy(&p->thread.regs.regs, current_pt_regs(),
 174                       sizeof(p->thread.regs.regs));
 175                PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
 176                if (sp != 0)
 177                        REGS_SP(p->thread.regs.regs.gp) = sp;
 178
 179                handler = fork_handler;
 180
 181                arch_copy_thread(&current->thread.arch, &p->thread.arch);
 182        } else {
 183                get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
 184                p->thread.request.u.thread.proc = (int (*)(void *))sp;
 185                p->thread.request.u.thread.arg = (void *)arg;
 186                handler = new_thread_handler;
 187        }
 188
 189        new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
 190
 191        if (!kthread) {
 192                clear_flushed_tls(p);
 193
 194                /*
 195                 * Set a new TLS for the child thread?
 196                 */
 197                if (clone_flags & CLONE_SETTLS)
 198                        ret = arch_copy_tls(p);
 199        }
 200
 201        return ret;
 202}
 203
 204void initial_thread_cb(void (*proc)(void *), void *arg)
 205{
 206        int save_kmalloc_ok = kmalloc_ok;
 207
 208        kmalloc_ok = 0;
 209        initial_thread_cb_skas(proc, arg);
 210        kmalloc_ok = save_kmalloc_ok;
 211}
 212
 213void arch_cpu_idle(void)
 214{
 215        unsigned long long nsecs;
 216
 217        cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
 218        nsecs = disable_timer();
 219        idle_sleep(nsecs);
 220        local_irq_enable();
 221}
 222
 223int __cant_sleep(void) {
 224        return in_atomic() || irqs_disabled() || in_interrupt();
 225        /* Is in_interrupt() really needed? */
 226}
 227
 228int user_context(unsigned long sp)
 229{
 230        unsigned long stack;
 231
 232        stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
 233        return stack != (unsigned long) current_thread_info();
 234}
 235
 236extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
 237
 238void do_uml_exitcalls(void)
 239{
 240        exitcall_t *call;
 241
 242        call = &__uml_exitcall_end;
 243        while (--call >= &__uml_exitcall_begin)
 244                (*call)();
 245}
 246
 247char *uml_strdup(const char *string)
 248{
 249        return kstrdup(string, GFP_KERNEL);
 250}
 251EXPORT_SYMBOL(uml_strdup);
 252
 253int copy_to_user_proc(void __user *to, void *from, int size)
 254{
 255        return copy_to_user(to, from, size);
 256}
 257
 258int copy_from_user_proc(void *to, void __user *from, int size)
 259{
 260        return copy_from_user(to, from, size);
 261}
 262
 263int clear_user_proc(void __user *buf, int size)
 264{
 265        return clear_user(buf, size);
 266}
 267
 268int strlen_user_proc(char __user *str)
 269{
 270        return strlen_user(str);
 271}
 272
 273int smp_sigio_handler(void)
 274{
 275#ifdef CONFIG_SMP
 276        int cpu = current_thread_info()->cpu;
 277        IPI_handler(cpu);
 278        if (cpu != 0)
 279                return 1;
 280#endif
 281        return 0;
 282}
 283
 284int cpu(void)
 285{
 286        return current_thread_info()->cpu;
 287}
 288
 289static atomic_t using_sysemu = ATOMIC_INIT(0);
 290int sysemu_supported;
 291
 292void set_using_sysemu(int value)
 293{
 294        if (value > sysemu_supported)
 295                return;
 296        atomic_set(&using_sysemu, value);
 297}
 298
 299int get_using_sysemu(void)
 300{
 301        return atomic_read(&using_sysemu);
 302}
 303
 304static int sysemu_proc_show(struct seq_file *m, void *v)
 305{
 306        seq_printf(m, "%d\n", get_using_sysemu());
 307        return 0;
 308}
 309
 310static int sysemu_proc_open(struct inode *inode, struct file *file)
 311{
 312        return single_open(file, sysemu_proc_show, NULL);
 313}
 314
 315static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
 316                                 size_t count, loff_t *pos)
 317{
 318        char tmp[2];
 319
 320        if (copy_from_user(tmp, buf, 1))
 321                return -EFAULT;
 322
 323        if (tmp[0] >= '0' && tmp[0] <= '2')
 324                set_using_sysemu(tmp[0] - '0');
 325        /* We use the first char, but pretend to write everything */
 326        return count;
 327}
 328
 329static const struct file_operations sysemu_proc_fops = {
 330        .owner          = THIS_MODULE,
 331        .open           = sysemu_proc_open,
 332        .read           = seq_read,
 333        .llseek         = seq_lseek,
 334        .release        = single_release,
 335        .write          = sysemu_proc_write,
 336};
 337
 338int __init make_proc_sysemu(void)
 339{
 340        struct proc_dir_entry *ent;
 341        if (!sysemu_supported)
 342                return 0;
 343
 344        ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
 345
 346        if (ent == NULL)
 347        {
 348                printk(KERN_WARNING "Failed to register /proc/sysemu\n");
 349                return 0;
 350        }
 351
 352        return 0;
 353}
 354
 355late_initcall(make_proc_sysemu);
 356
 357int singlestepping(void * t)
 358{
 359        struct task_struct *task = t ? t : current;
 360
 361        if (!(task->ptrace & PT_DTRACE))
 362                return 0;
 363
 364        if (task->thread.singlestep_syscall)
 365                return 1;
 366
 367        return 2;
 368}
 369
 370/*
 371 * Only x86 and x86_64 have an arch_align_stack().
 372 * All other arches have "#define arch_align_stack(x) (x)"
 373 * in their asm/system.h
 374 * As this is included in UML from asm-um/system-generic.h,
 375 * we can use it to behave as the subarch does.
 376 */
 377#ifndef arch_align_stack
 378unsigned long arch_align_stack(unsigned long sp)
 379{
 380        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
 381                sp -= get_random_int() % 8192;
 382        return sp & ~0xf;
 383}
 384#endif
 385
 386unsigned long get_wchan(struct task_struct *p)
 387{
 388        unsigned long stack_page, sp, ip;
 389        bool seen_sched = 0;
 390
 391        if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
 392                return 0;
 393
 394        stack_page = (unsigned long) task_stack_page(p);
 395        /* Bail if the process has no kernel stack for some reason */
 396        if (stack_page == 0)
 397                return 0;
 398
 399        sp = p->thread.switch_buf->JB_SP;
 400        /*
 401         * Bail if the stack pointer is below the bottom of the kernel
 402         * stack for some reason
 403         */
 404        if (sp < stack_page)
 405                return 0;
 406
 407        while (sp < stack_page + THREAD_SIZE) {
 408                ip = *((unsigned long *) sp);
 409                if (in_sched_functions(ip))
 410                        /* Ignore everything until we're above the scheduler */
 411                        seen_sched = 1;
 412                else if (kernel_text_address(ip) && seen_sched)
 413                        return ip;
 414
 415                sp += sizeof(unsigned long);
 416        }
 417
 418        return 0;
 419}
 420
 421int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
 422{
 423        int cpu = current_thread_info()->cpu;
 424
 425        return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
 426}
 427
 428