linux/arch/mips/kernel/process.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
   7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
   8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   9 * Copyright (C) 2004 Thiemo Seufer
  10 * Copyright (C) 2013  Imagination Technologies Ltd.
  11 */
  12#include <linux/errno.h>
  13#include <linux/sched.h>
  14#include <linux/tick.h>
  15#include <linux/kernel.h>
  16#include <linux/mm.h>
  17#include <linux/stddef.h>
  18#include <linux/unistd.h>
  19#include <linux/export.h>
  20#include <linux/ptrace.h>
  21#include <linux/mman.h>
  22#include <linux/personality.h>
  23#include <linux/sys.h>
  24#include <linux/user.h>
  25#include <linux/init.h>
  26#include <linux/completion.h>
  27#include <linux/kallsyms.h>
  28#include <linux/random.h>
  29
  30#include <asm/asm.h>
  31#include <asm/bootinfo.h>
  32#include <asm/cpu.h>
  33#include <asm/dsp.h>
  34#include <asm/fpu.h>
  35#include <asm/pgtable.h>
  36#include <asm/mipsregs.h>
  37#include <asm/processor.h>
  38#include <asm/uaccess.h>
  39#include <asm/io.h>
  40#include <asm/elf.h>
  41#include <asm/isadep.h>
  42#include <asm/inst.h>
  43#include <asm/stacktrace.h>
  44
  45#ifdef CONFIG_HOTPLUG_CPU
  46void arch_cpu_idle_dead(void)
  47{
  48        /* What the heck is this check doing ? */
  49        if (!cpu_isset(smp_processor_id(), cpu_callin_map))
  50                play_dead();
  51}
  52#endif
  53
  54asmlinkage void ret_from_fork(void);
  55asmlinkage void ret_from_kernel_thread(void);
  56
  57void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
  58{
  59        unsigned long status;
  60
  61        /* New thread loses kernel privileges. */
  62        status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
  63#ifdef CONFIG_64BIT
  64        status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
  65#endif
  66        status |= KU_USER;
  67        regs->cp0_status = status;
  68        clear_used_math();
  69        clear_fpu_owner();
  70        if (cpu_has_dsp)
  71                __init_dsp();
  72        regs->cp0_epc = pc;
  73        regs->regs[29] = sp;
  74}
  75
  76void exit_thread(void)
  77{
  78}
  79
  80void flush_thread(void)
  81{
  82}
  83
  84int copy_thread(unsigned long clone_flags, unsigned long usp,
  85        unsigned long arg, struct task_struct *p)
  86{
  87        struct thread_info *ti = task_thread_info(p);
  88        struct pt_regs *childregs, *regs = current_pt_regs();
  89        unsigned long childksp;
  90        p->set_child_tid = p->clear_child_tid = NULL;
  91
  92        childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
  93
  94        preempt_disable();
  95
  96        if (is_fpu_owner())
  97                save_fp(p);
  98
  99        if (cpu_has_dsp)
 100                save_dsp(p);
 101
 102        preempt_enable();
 103
 104        /* set up new TSS. */
 105        childregs = (struct pt_regs *) childksp - 1;
 106        /*  Put the stack after the struct pt_regs.  */
 107        childksp = (unsigned long) childregs;
 108        p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
 109        if (unlikely(p->flags & PF_KTHREAD)) {
 110                unsigned long status = p->thread.cp0_status;
 111                memset(childregs, 0, sizeof(struct pt_regs));
 112                ti->addr_limit = KERNEL_DS;
 113                p->thread.reg16 = usp; /* fn */
 114                p->thread.reg17 = arg;
 115                p->thread.reg29 = childksp;
 116                p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
 117#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 118                status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
 119                         ((status & (ST0_KUC | ST0_IEC)) << 2);
 120#else
 121                status |= ST0_EXL;
 122#endif
 123                childregs->cp0_status = status;
 124                return 0;
 125        }
 126        *childregs = *regs;
 127        childregs->regs[7] = 0; /* Clear error flag */
 128        childregs->regs[2] = 0; /* Child gets zero as return value */
 129        if (usp)
 130                childregs->regs[29] = usp;
 131        ti->addr_limit = USER_DS;
 132
 133        p->thread.reg29 = (unsigned long) childregs;
 134        p->thread.reg31 = (unsigned long) ret_from_fork;
 135
 136        /*
 137         * New tasks lose permission to use the fpu. This accelerates context
 138         * switching for most programs since they don't use the fpu.
 139         */
 140        childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
 141
 142#ifdef CONFIG_MIPS_MT_SMTC
 143        /*
 144         * SMTC restores TCStatus after Status, and the CU bits
 145         * are aliased there.
 146         */
 147        childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
 148#endif
 149        clear_tsk_thread_flag(p, TIF_USEDFPU);
 150
 151#ifdef CONFIG_MIPS_MT_FPAFF
 152        clear_tsk_thread_flag(p, TIF_FPUBOUND);
 153#endif /* CONFIG_MIPS_MT_FPAFF */
 154
 155        if (clone_flags & CLONE_SETTLS)
 156                ti->tp_value = regs->regs[7];
 157
 158        return 0;
 159}
 160
 161/* Fill in the fpu structure for a core dump.. */
 162int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
 163{
 164        memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
 165
 166        return 1;
 167}
 168
 169void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
 170{
 171        int i;
 172
 173        for (i = 0; i < EF_R0; i++)
 174                gp[i] = 0;
 175        gp[EF_R0] = 0;
 176        for (i = 1; i <= 31; i++)
 177                gp[EF_R0 + i] = regs->regs[i];
 178        gp[EF_R26] = 0;
 179        gp[EF_R27] = 0;
 180        gp[EF_LO] = regs->lo;
 181        gp[EF_HI] = regs->hi;
 182        gp[EF_CP0_EPC] = regs->cp0_epc;
 183        gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr;
 184        gp[EF_CP0_STATUS] = regs->cp0_status;
 185        gp[EF_CP0_CAUSE] = regs->cp0_cause;
 186#ifdef EF_UNUSED0
 187        gp[EF_UNUSED0] = 0;
 188#endif
 189}
 190
 191int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 192{
 193        elf_dump_regs(*regs, task_pt_regs(tsk));
 194        return 1;
 195}
 196
 197int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
 198{
 199        memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
 200
 201        return 1;
 202}
 203
 204#ifdef CONFIG_CC_STACKPROTECTOR
 205#include <linux/stackprotector.h>
 206unsigned long __stack_chk_guard __read_mostly;
 207EXPORT_SYMBOL(__stack_chk_guard);
 208#endif
 209
 210struct mips_frame_info {
 211        void            *func;
 212        unsigned long   func_size;
 213        int             frame_size;
 214        int             pc_offset;
 215};
 216
 217#define J_TARGET(pc,target)     \
 218                (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
 219
 220static inline int is_ra_save_ins(union mips_instruction *ip)
 221{
 222#ifdef CONFIG_CPU_MICROMIPS
 223        union mips_instruction mmi;
 224
 225        /*
 226         * swsp ra,offset
 227         * swm16 reglist,offset(sp)
 228         * swm32 reglist,offset(sp)
 229         * sw32 ra,offset(sp)
 230         * jradiussp - NOT SUPPORTED
 231         *
 232         * microMIPS is way more fun...
 233         */
 234        if (mm_insn_16bit(ip->halfword[0])) {
 235                mmi.word = (ip->halfword[0] << 16);
 236                return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
 237                         mmi.mm16_r5_format.rt == 31) ||
 238                        (mmi.mm16_m_format.opcode == mm_pool16c_op &&
 239                         mmi.mm16_m_format.func == mm_swm16_op));
 240        }
 241        else {
 242                mmi.halfword[0] = ip->halfword[1];
 243                mmi.halfword[1] = ip->halfword[0];
 244                return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
 245                         mmi.mm_m_format.rd > 9 &&
 246                         mmi.mm_m_format.base == 29 &&
 247                         mmi.mm_m_format.func == mm_swm32_func) ||
 248                        (mmi.i_format.opcode == mm_sw32_op &&
 249                         mmi.i_format.rs == 29 &&
 250                         mmi.i_format.rt == 31));
 251        }
 252#else
 253        /* sw / sd $ra, offset($sp) */
 254        return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
 255                ip->i_format.rs == 29 &&
 256                ip->i_format.rt == 31;
 257#endif
 258}
 259
 260static inline int is_jump_ins(union mips_instruction *ip)
 261{
 262#ifdef CONFIG_CPU_MICROMIPS
 263        /*
 264         * jr16,jrc,jalr16,jalr16
 265         * jal
 266         * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
 267         * jraddiusp - NOT SUPPORTED
 268         *
 269         * microMIPS is kind of more fun...
 270         */
 271        union mips_instruction mmi;
 272
 273        mmi.word = (ip->halfword[0] << 16);
 274
 275        if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
 276            (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
 277            ip->j_format.opcode == mm_jal32_op)
 278                return 1;
 279        if (ip->r_format.opcode != mm_pool32a_op ||
 280                        ip->r_format.func != mm_pool32axf_op)
 281                return 0;
 282        return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
 283#else
 284        if (ip->j_format.opcode == j_op)
 285                return 1;
 286        if (ip->j_format.opcode == jal_op)
 287                return 1;
 288        if (ip->r_format.opcode != spec_op)
 289                return 0;
 290        return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
 291#endif
 292}
 293
 294static inline int is_sp_move_ins(union mips_instruction *ip)
 295{
 296#ifdef CONFIG_CPU_MICROMIPS
 297        /*
 298         * addiusp -imm
 299         * addius5 sp,-imm
 300         * addiu32 sp,sp,-imm
 301         * jradiussp - NOT SUPPORTED
 302         *
 303         * microMIPS is not more fun...
 304         */
 305        if (mm_insn_16bit(ip->halfword[0])) {
 306                union mips_instruction mmi;
 307
 308                mmi.word = (ip->halfword[0] << 16);
 309                return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
 310                         mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
 311                        (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
 312                         mmi.mm16_r5_format.rt == 29));
 313        }
 314        return (ip->mm_i_format.opcode == mm_addiu32_op &&
 315                 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
 316#else
 317        /* addiu/daddiu sp,sp,-imm */
 318        if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
 319                return 0;
 320        if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
 321                return 1;
 322#endif
 323        return 0;
 324}
 325
 326static int get_frame_info(struct mips_frame_info *info)
 327{
 328#ifdef CONFIG_CPU_MICROMIPS
 329        union mips_instruction *ip = (void *) (((char *) info->func) - 1);
 330#else
 331        union mips_instruction *ip = info->func;
 332#endif
 333        unsigned max_insns = info->func_size / sizeof(union mips_instruction);
 334        unsigned i;
 335
 336        info->pc_offset = -1;
 337        info->frame_size = 0;
 338
 339        if (!ip)
 340                goto err;
 341
 342        if (max_insns == 0)
 343                max_insns = 128U;       /* unknown function size */
 344        max_insns = min(128U, max_insns);
 345
 346        for (i = 0; i < max_insns; i++, ip++) {
 347
 348                if (is_jump_ins(ip))
 349                        break;
 350                if (!info->frame_size) {
 351                        if (is_sp_move_ins(ip))
 352                        {
 353#ifdef CONFIG_CPU_MICROMIPS
 354                                if (mm_insn_16bit(ip->halfword[0]))
 355                                {
 356                                        unsigned short tmp;
 357
 358                                        if (ip->halfword[0] & mm_addiusp_func)
 359                                        {
 360                                                tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
 361                                                info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
 362                                        } else {
 363                                                tmp = (ip->halfword[0] >> 1);
 364                                                info->frame_size = -(signed short)(tmp & 0xf);
 365                                        }
 366                                        ip = (void *) &ip->halfword[1];
 367                                        ip--;
 368                                } else
 369#endif
 370                                info->frame_size = - ip->i_format.simmediate;
 371                        }
 372                        continue;
 373                }
 374                if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
 375                        info->pc_offset =
 376                                ip->i_format.simmediate / sizeof(long);
 377                        break;
 378                }
 379        }
 380        if (info->frame_size && info->pc_offset >= 0) /* nested */
 381                return 0;
 382        if (info->pc_offset < 0) /* leaf */
 383                return 1;
 384        /* prologue seems boggus... */
 385err:
 386        return -1;
 387}
 388
 389static struct mips_frame_info schedule_mfi __read_mostly;
 390
 391#ifdef CONFIG_KALLSYMS
 392static unsigned long get___schedule_addr(void)
 393{
 394        return kallsyms_lookup_name("__schedule");
 395}
 396#else
 397static unsigned long get___schedule_addr(void)
 398{
 399        union mips_instruction *ip = (void *)schedule;
 400        int max_insns = 8;
 401        int i;
 402
 403        for (i = 0; i < max_insns; i++, ip++) {
 404                if (ip->j_format.opcode == j_op)
 405                        return J_TARGET(ip, ip->j_format.target);
 406        }
 407        return 0;
 408}
 409#endif
 410
 411static int __init frame_info_init(void)
 412{
 413        unsigned long size = 0;
 414#ifdef CONFIG_KALLSYMS
 415        unsigned long ofs;
 416#endif
 417        unsigned long addr;
 418
 419        addr = get___schedule_addr();
 420        if (!addr)
 421                addr = (unsigned long)schedule;
 422
 423#ifdef CONFIG_KALLSYMS
 424        kallsyms_lookup_size_offset(addr, &size, &ofs);
 425#endif
 426        schedule_mfi.func = (void *)addr;
 427        schedule_mfi.func_size = size;
 428
 429        get_frame_info(&schedule_mfi);
 430
 431        /*
 432         * Without schedule() frame info, result given by
 433         * thread_saved_pc() and get_wchan() are not reliable.
 434         */
 435        if (schedule_mfi.pc_offset < 0)
 436                printk("Can't analyze schedule() prologue at %p\n", schedule);
 437
 438        return 0;
 439}
 440
 441arch_initcall(frame_info_init);
 442
 443/*
 444 * Return saved PC of a blocked thread.
 445 */
 446unsigned long thread_saved_pc(struct task_struct *tsk)
 447{
 448        struct thread_struct *t = &tsk->thread;
 449
 450        /* New born processes are a special case */
 451        if (t->reg31 == (unsigned long) ret_from_fork)
 452                return t->reg31;
 453        if (schedule_mfi.pc_offset < 0)
 454                return 0;
 455        return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
 456}
 457
 458
 459#ifdef CONFIG_KALLSYMS
 460/* generic stack unwinding function */
 461unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
 462                                              unsigned long *sp,
 463                                              unsigned long pc,
 464                                              unsigned long *ra)
 465{
 466        struct mips_frame_info info;
 467        unsigned long size, ofs;
 468        int leaf;
 469        extern void ret_from_irq(void);
 470        extern void ret_from_exception(void);
 471
 472        if (!stack_page)
 473                return 0;
 474
 475        /*
 476         * If we reached the bottom of interrupt context,
 477         * return saved pc in pt_regs.
 478         */
 479        if (pc == (unsigned long)ret_from_irq ||
 480            pc == (unsigned long)ret_from_exception) {
 481                struct pt_regs *regs;
 482                if (*sp >= stack_page &&
 483                    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
 484                        regs = (struct pt_regs *)*sp;
 485                        pc = regs->cp0_epc;
 486                        if (__kernel_text_address(pc)) {
 487                                *sp = regs->regs[29];
 488                                *ra = regs->regs[31];
 489                                return pc;
 490                        }
 491                }
 492                return 0;
 493        }
 494        if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
 495                return 0;
 496        /*
 497         * Return ra if an exception occurred at the first instruction
 498         */
 499        if (unlikely(ofs == 0)) {
 500                pc = *ra;
 501                *ra = 0;
 502                return pc;
 503        }
 504
 505        info.func = (void *)(pc - ofs);
 506        info.func_size = ofs;   /* analyze from start to ofs */
 507        leaf = get_frame_info(&info);
 508        if (leaf < 0)
 509                return 0;
 510
 511        if (*sp < stack_page ||
 512            *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
 513                return 0;
 514
 515        if (leaf)
 516                /*
 517                 * For some extreme cases, get_frame_info() can
 518                 * consider wrongly a nested function as a leaf
 519                 * one. In that cases avoid to return always the
 520                 * same value.
 521                 */
 522                pc = pc != *ra ? *ra : 0;
 523        else
 524                pc = ((unsigned long *)(*sp))[info.pc_offset];
 525
 526        *sp += info.frame_size;
 527        *ra = 0;
 528        return __kernel_text_address(pc) ? pc : 0;
 529}
 530EXPORT_SYMBOL(unwind_stack_by_address);
 531
 532/* used by show_backtrace() */
 533unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
 534                           unsigned long pc, unsigned long *ra)
 535{
 536        unsigned long stack_page = (unsigned long)task_stack_page(task);
 537        return unwind_stack_by_address(stack_page, sp, pc, ra);
 538}
 539#endif
 540
 541/*
 542 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
 543 */
 544unsigned long get_wchan(struct task_struct *task)
 545{
 546        unsigned long pc = 0;
 547#ifdef CONFIG_KALLSYMS
 548        unsigned long sp;
 549        unsigned long ra = 0;
 550#endif
 551
 552        if (!task || task == current || task->state == TASK_RUNNING)
 553                goto out;
 554        if (!task_stack_page(task))
 555                goto out;
 556
 557        pc = thread_saved_pc(task);
 558
 559#ifdef CONFIG_KALLSYMS
 560        sp = task->thread.reg29 + schedule_mfi.frame_size;
 561
 562        while (in_sched_functions(pc))
 563                pc = unwind_stack(task, &sp, pc, &ra);
 564#endif
 565
 566out:
 567        return pc;
 568}
 569
 570/*
 571 * Don't forget that the stack pointer must be aligned on a 8 bytes
 572 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
 573 */
 574unsigned long arch_align_stack(unsigned long sp)
 575{
 576        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
 577                sp -= get_random_int() & ~PAGE_MASK;
 578
 579        return sp & ALMASK;
 580}
 581