linux/arch/mn10300/kernel/process.c
<<
>>
Prefs
   1/* MN10300  Process handling code
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public Licence
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the Licence, or (at your option) any later version.
  10 */
  11#include <linux/module.h>
  12#include <linux/errno.h>
  13#include <linux/sched.h>
  14#include <linux/sched/debug.h>
  15#include <linux/sched/task.h>
  16#include <linux/sched/task_stack.h>
  17#include <linux/kernel.h>
  18#include <linux/mm.h>
  19#include <linux/smp.h>
  20#include <linux/stddef.h>
  21#include <linux/unistd.h>
  22#include <linux/ptrace.h>
  23#include <linux/user.h>
  24#include <linux/interrupt.h>
  25#include <linux/delay.h>
  26#include <linux/reboot.h>
  27#include <linux/percpu.h>
  28#include <linux/err.h>
  29#include <linux/fs.h>
  30#include <linux/slab.h>
  31#include <linux/rcupdate.h>
  32#include <linux/uaccess.h>
  33#include <asm/pgtable.h>
  34#include <asm/io.h>
  35#include <asm/processor.h>
  36#include <asm/mmu_context.h>
  37#include <asm/fpu.h>
  38#include <asm/reset-regs.h>
  39#include <asm/gdb-stub.h>
  40#include "internal.h"
  41
  42/*
  43 * power off function, if any
  44 */
  45void (*pm_power_off)(void);
  46EXPORT_SYMBOL(pm_power_off);
  47
  48/*
  49 * On SMP it's slightly faster (but much more power-consuming!)
  50 * to poll the ->work.need_resched flag instead of waiting for the
  51 * cross-CPU IPI to arrive. Use this option with caution.
  52 *
  53 * tglx: No idea why this depends on HOTPLUG_CPU !?!
  54 */
  55#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
  56void arch_cpu_idle(void)
  57{
  58        safe_halt();
  59}
  60#endif
  61
  62void machine_restart(char *cmd)
  63{
  64#ifdef CONFIG_KERNEL_DEBUGGER
  65        gdbstub_exit(0);
  66#endif
  67
  68#ifdef mn10300_unit_hard_reset
  69        mn10300_unit_hard_reset();
  70#else
  71        mn10300_proc_hard_reset();
  72#endif
  73}
  74
  75void machine_halt(void)
  76{
  77#ifdef CONFIG_KERNEL_DEBUGGER
  78        gdbstub_exit(0);
  79#endif
  80}
  81
  82void machine_power_off(void)
  83{
  84#ifdef CONFIG_KERNEL_DEBUGGER
  85        gdbstub_exit(0);
  86#endif
  87}
  88
  89void show_regs(struct pt_regs *regs)
  90{
  91        show_regs_print_info(KERN_DEFAULT);
  92}
  93
  94/*
  95 * free current thread data structures etc..
  96 */
  97void exit_thread(struct task_struct *tsk)
  98{
  99        exit_fpu(tsk);
 100}
 101
 102void flush_thread(void)
 103{
 104        flush_fpu();
 105}
 106
 107void release_thread(struct task_struct *dead_task)
 108{
 109}
 110
 111/*
 112 * this gets called so that we can store lazy state into memory and copy the
 113 * current task into the new thread.
 114 */
 115int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 116{
 117        unlazy_fpu(src);
 118        *dst = *src;
 119        return 0;
 120}
 121
 122/*
 123 * set up the kernel stack for a new thread and copy arch-specific thread
 124 * control information
 125 */
 126int copy_thread(unsigned long clone_flags,
 127                unsigned long c_usp, unsigned long ustk_size,
 128                struct task_struct *p)
 129{
 130        struct thread_info *ti = task_thread_info(p);
 131        struct pt_regs *c_regs;
 132        unsigned long c_ksp;
 133
 134        c_ksp = (unsigned long) task_stack_page(p) + THREAD_SIZE;
 135
 136        /* allocate the userspace exception frame and set it up */
 137        c_ksp -= sizeof(struct pt_regs);
 138        c_regs = (struct pt_regs *) c_ksp;
 139        c_ksp -= 12; /* allocate function call ABI slack */
 140
 141        /* set up things up so the scheduler can start the new task */
 142        p->thread.uregs = c_regs;
 143        ti->frame       = c_regs;
 144        p->thread.a3    = (unsigned long) c_regs;
 145        p->thread.sp    = c_ksp;
 146        p->thread.wchan = p->thread.pc;
 147        p->thread.usp   = c_usp;
 148
 149        if (unlikely(p->flags & PF_KTHREAD)) {
 150                memset(c_regs, 0, sizeof(struct pt_regs));
 151                c_regs->a0 = c_usp; /* function */
 152                c_regs->d0 = ustk_size; /* argument */
 153                local_save_flags(c_regs->epsw);
 154                c_regs->epsw |= EPSW_IE | EPSW_IM_7;
 155                p->thread.pc    = (unsigned long) ret_from_kernel_thread;
 156                return 0;
 157        }
 158        *c_regs = *current_pt_regs();
 159        if (c_usp)
 160                c_regs->sp = c_usp;
 161        c_regs->epsw &= ~EPSW_FE; /* my FPU */
 162
 163        /* the new TLS pointer is passed in as arg #5 to sys_clone() */
 164        if (clone_flags & CLONE_SETTLS)
 165                c_regs->e2 = current_frame()->d3;
 166
 167        p->thread.pc    = (unsigned long) ret_from_fork;
 168
 169        return 0;
 170}
 171
 172unsigned long get_wchan(struct task_struct *p)
 173{
 174        return p->thread.wchan;
 175}
 176