linux/arch/mn10300/kernel/process.c
<<
>>
Prefs
   1/* MN10300  Process handling code
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public Licence
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the Licence, or (at your option) any later version.
  10 */
  11#include <linux/module.h>
  12#include <linux/errno.h>
  13#include <linux/sched.h>
  14#include <linux/kernel.h>
  15#include <linux/mm.h>
  16#include <linux/smp.h>
  17#include <linux/stddef.h>
  18#include <linux/unistd.h>
  19#include <linux/ptrace.h>
  20#include <linux/user.h>
  21#include <linux/interrupt.h>
  22#include <linux/delay.h>
  23#include <linux/reboot.h>
  24#include <linux/percpu.h>
  25#include <linux/err.h>
  26#include <linux/fs.h>
  27#include <linux/slab.h>
  28#include <linux/rcupdate.h>
  29#include <asm/uaccess.h>
  30#include <asm/pgtable.h>
  31#include <asm/io.h>
  32#include <asm/processor.h>
  33#include <asm/mmu_context.h>
  34#include <asm/fpu.h>
  35#include <asm/reset-regs.h>
  36#include <asm/gdb-stub.h>
  37#include "internal.h"
  38
  39/*
  40 * return saved PC of a blocked thread.
  41 */
  42unsigned long thread_saved_pc(struct task_struct *tsk)
  43{
  44        return ((unsigned long *) tsk->thread.sp)[3];
  45}
  46
  47/*
  48 * power off function, if any
  49 */
  50void (*pm_power_off)(void);
  51EXPORT_SYMBOL(pm_power_off);
  52
  53/*
  54 * On SMP it's slightly faster (but much more power-consuming!)
  55 * to poll the ->work.need_resched flag instead of waiting for the
  56 * cross-CPU IPI to arrive. Use this option with caution.
  57 *
  58 * tglx: No idea why this depends on HOTPLUG_CPU !?!
  59 */
  60#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
  61void arch_cpu_idle(void)
  62{
  63        safe_halt();
  64}
  65#endif
  66
  67void release_segments(struct mm_struct *mm)
  68{
  69}
  70
  71void machine_restart(char *cmd)
  72{
  73#ifdef CONFIG_KERNEL_DEBUGGER
  74        gdbstub_exit(0);
  75#endif
  76
  77#ifdef mn10300_unit_hard_reset
  78        mn10300_unit_hard_reset();
  79#else
  80        mn10300_proc_hard_reset();
  81#endif
  82}
  83
  84void machine_halt(void)
  85{
  86#ifdef CONFIG_KERNEL_DEBUGGER
  87        gdbstub_exit(0);
  88#endif
  89}
  90
  91void machine_power_off(void)
  92{
  93#ifdef CONFIG_KERNEL_DEBUGGER
  94        gdbstub_exit(0);
  95#endif
  96}
  97
  98void show_regs(struct pt_regs *regs)
  99{
 100        show_regs_print_info(KERN_DEFAULT);
 101}
 102
 103/*
 104 * free current thread data structures etc..
 105 */
 106void exit_thread(void)
 107{
 108        exit_fpu();
 109}
 110
 111void flush_thread(void)
 112{
 113        flush_fpu();
 114}
 115
 116void release_thread(struct task_struct *dead_task)
 117{
 118}
 119
 120/*
 121 * we do not have to muck with descriptors here, that is
 122 * done in switch_mm() as needed.
 123 */
 124void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
 125{
 126}
 127
 128/*
 129 * this gets called so that we can store lazy state into memory and copy the
 130 * current task into the new thread.
 131 */
 132int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 133{
 134        unlazy_fpu(src);
 135        *dst = *src;
 136        return 0;
 137}
 138
 139/*
 140 * set up the kernel stack for a new thread and copy arch-specific thread
 141 * control information
 142 */
 143int copy_thread(unsigned long clone_flags,
 144                unsigned long c_usp, unsigned long ustk_size,
 145                struct task_struct *p)
 146{
 147        struct thread_info *ti = task_thread_info(p);
 148        struct pt_regs *c_regs;
 149        unsigned long c_ksp;
 150
 151        c_ksp = (unsigned long) task_stack_page(p) + THREAD_SIZE;
 152
 153        /* allocate the userspace exception frame and set it up */
 154        c_ksp -= sizeof(struct pt_regs);
 155        c_regs = (struct pt_regs *) c_ksp;
 156        c_ksp -= 12; /* allocate function call ABI slack */
 157
 158        /* set up things up so the scheduler can start the new task */
 159        p->thread.uregs = c_regs;
 160        ti->frame       = c_regs;
 161        p->thread.a3    = (unsigned long) c_regs;
 162        p->thread.sp    = c_ksp;
 163        p->thread.wchan = p->thread.pc;
 164        p->thread.usp   = c_usp;
 165
 166        if (unlikely(p->flags & PF_KTHREAD)) {
 167                memset(c_regs, 0, sizeof(struct pt_regs));
 168                c_regs->a0 = c_usp; /* function */
 169                c_regs->d0 = ustk_size; /* argument */
 170                local_save_flags(c_regs->epsw);
 171                c_regs->epsw |= EPSW_IE | EPSW_IM_7;
 172                p->thread.pc    = (unsigned long) ret_from_kernel_thread;
 173                return 0;
 174        }
 175        *c_regs = *current_pt_regs();
 176        if (c_usp)
 177                c_regs->sp = c_usp;
 178        c_regs->epsw &= ~EPSW_FE; /* my FPU */
 179
 180        /* the new TLS pointer is passed in as arg #5 to sys_clone() */
 181        if (clone_flags & CLONE_SETTLS)
 182                c_regs->e2 = current_frame()->d3;
 183
 184        p->thread.pc    = (unsigned long) ret_from_fork;
 185
 186        return 0;
 187}
 188
 189unsigned long get_wchan(struct task_struct *p)
 190{
 191        return p->thread.wchan;
 192}
 193