linux/arch/mips/kernel/process.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
   7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
   8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   9 * Copyright (C) 2004 Thiemo Seufer
  10 * Copyright (C) 2013  Imagination Technologies Ltd.
  11 */
  12#include <linux/errno.h>
  13#include <linux/sched.h>
  14#include <linux/tick.h>
  15#include <linux/kernel.h>
  16#include <linux/mm.h>
  17#include <linux/stddef.h>
  18#include <linux/unistd.h>
  19#include <linux/export.h>
  20#include <linux/ptrace.h>
  21#include <linux/mman.h>
  22#include <linux/personality.h>
  23#include <linux/sys.h>
  24#include <linux/user.h>
  25#include <linux/init.h>
  26#include <linux/completion.h>
  27#include <linux/kallsyms.h>
  28#include <linux/random.h>
  29
  30#include <asm/asm.h>
  31#include <asm/bootinfo.h>
  32#include <asm/cpu.h>
  33#include <asm/dsp.h>
  34#include <asm/fpu.h>
  35#include <asm/pgtable.h>
  36#include <asm/mipsregs.h>
  37#include <asm/processor.h>
  38#include <asm/uaccess.h>
  39#include <asm/io.h>
  40#include <asm/elf.h>
  41#include <asm/isadep.h>
  42#include <asm/inst.h>
  43#include <asm/stacktrace.h>
  44
  45#ifdef CONFIG_HOTPLUG_CPU
  46void arch_cpu_idle_dead(void)
  47{
  48        /* What the heck is this check doing ? */
  49        if (!cpu_isset(smp_processor_id(), cpu_callin_map))
  50                play_dead();
  51}
  52#endif
  53
  54asmlinkage void ret_from_fork(void);
  55asmlinkage void ret_from_kernel_thread(void);
  56
  57void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
  58{
  59        unsigned long status;
  60
  61        /* New thread loses kernel privileges. */
  62        status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
  63#ifdef CONFIG_64BIT
  64        status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
  65#endif
  66        status |= KU_USER;
  67        regs->cp0_status = status;
  68        clear_used_math();
  69        clear_fpu_owner();
  70        if (cpu_has_dsp)
  71                __init_dsp();
  72        regs->cp0_epc = pc;
  73        regs->regs[29] = sp;
  74}
  75
  76void exit_thread(void)
  77{
  78}
  79
  80void flush_thread(void)
  81{
  82}
  83
  84int copy_thread(unsigned long clone_flags, unsigned long usp,
  85        unsigned long arg, struct task_struct *p)
  86{
  87        struct thread_info *ti = task_thread_info(p);
  88        struct pt_regs *childregs, *regs = current_pt_regs();
  89        unsigned long childksp;
  90        p->set_child_tid = p->clear_child_tid = NULL;
  91
  92        childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
  93
  94        preempt_disable();
  95
  96        if (is_fpu_owner())
  97                save_fp(p);
  98
  99        if (cpu_has_dsp)
 100                save_dsp(p);
 101
 102        preempt_enable();
 103
 104        /* set up new TSS. */
 105        childregs = (struct pt_regs *) childksp - 1;
 106        /*  Put the stack after the struct pt_regs.  */
 107        childksp = (unsigned long) childregs;
 108        p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
 109        if (unlikely(p->flags & PF_KTHREAD)) {
 110                unsigned long status = p->thread.cp0_status;
 111                memset(childregs, 0, sizeof(struct pt_regs));
 112                ti->addr_limit = KERNEL_DS;
 113                p->thread.reg16 = usp; /* fn */
 114                p->thread.reg17 = arg;
 115                p->thread.reg29 = childksp;
 116                p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
 117#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 118                status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
 119                         ((status & (ST0_KUC | ST0_IEC)) << 2);
 120#else
 121                status |= ST0_EXL;
 122#endif
 123                childregs->cp0_status = status;
 124                return 0;
 125        }
 126        *childregs = *regs;
 127        childregs->regs[7] = 0; /* Clear error flag */
 128        childregs->regs[2] = 0; /* Child gets zero as return value */
 129        if (usp)
 130                childregs->regs[29] = usp;
 131        ti->addr_limit = USER_DS;
 132
 133        p->thread.reg29 = (unsigned long) childregs;
 134        p->thread.reg31 = (unsigned long) ret_from_fork;
 135
 136        /*
 137         * New tasks lose permission to use the fpu. This accelerates context
 138         * switching for most programs since they don't use the fpu.
 139         */
 140        childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
 141
 142#ifdef CONFIG_MIPS_MT_SMTC
 143        /*
 144         * SMTC restores TCStatus after Status, and the CU bits
 145         * are aliased there.
 146         */
 147        childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
 148#endif
 149        clear_tsk_thread_flag(p, TIF_USEDFPU);
 150
 151#ifdef CONFIG_MIPS_MT_FPAFF
 152        clear_tsk_thread_flag(p, TIF_FPUBOUND);
 153#endif /* CONFIG_MIPS_MT_FPAFF */
 154
 155        if (clone_flags & CLONE_SETTLS)
 156                ti->tp_value = regs->regs[7];
 157
 158        return 0;
 159}
 160
 161/* Fill in the fpu structure for a core dump.. */
 162int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
 163{
 164        memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
 165
 166        return 1;
 167}
 168
 169void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
 170{
 171        int i;
 172
 173        for (i = 0; i < EF_R0; i++)
 174                gp[i] = 0;
 175        gp[EF_R0] = 0;
 176        for (i = 1; i <= 31; i++)
 177                gp[EF_R0 + i] = regs->regs[i];
 178        gp[EF_R26] = 0;
 179        gp[EF_R27] = 0;
 180        gp[EF_LO] = regs->lo;
 181        gp[EF_HI] = regs->hi;
 182        gp[EF_CP0_EPC] = regs->cp0_epc;
 183        gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr;
 184        gp[EF_CP0_STATUS] = regs->cp0_status;
 185        gp[EF_CP0_CAUSE] = regs->cp0_cause;
 186#ifdef EF_UNUSED0
 187        gp[EF_UNUSED0] = 0;
 188#endif
 189}
 190
 191int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 192{
 193        elf_dump_regs(*regs, task_pt_regs(tsk));
 194        return 1;
 195}
 196
 197int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
 198{
 199        memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
 200
 201        return 1;
 202}
 203
 204/*
 205 *
 206 */
 207struct mips_frame_info {
 208        void            *func;
 209        unsigned long   func_size;
 210        int             frame_size;
 211        int             pc_offset;
 212};
 213
 214#define J_TARGET(pc,target)     \
 215                (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
 216
 217static inline int is_ra_save_ins(union mips_instruction *ip)
 218{
 219#ifdef CONFIG_CPU_MICROMIPS
 220        union mips_instruction mmi;
 221
 222        /*
 223         * swsp ra,offset
 224         * swm16 reglist,offset(sp)
 225         * swm32 reglist,offset(sp)
 226         * sw32 ra,offset(sp)
 227         * jradiussp - NOT SUPPORTED
 228         *
 229         * microMIPS is way more fun...
 230         */
 231        if (mm_insn_16bit(ip->halfword[0])) {
 232                mmi.word = (ip->halfword[0] << 16);
 233                return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
 234                         mmi.mm16_r5_format.rt == 31) ||
 235                        (mmi.mm16_m_format.opcode == mm_pool16c_op &&
 236                         mmi.mm16_m_format.func == mm_swm16_op));
 237        }
 238        else {
 239                mmi.halfword[0] = ip->halfword[1];
 240                mmi.halfword[1] = ip->halfword[0];
 241                return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
 242                         mmi.mm_m_format.rd > 9 &&
 243                         mmi.mm_m_format.base == 29 &&
 244                         mmi.mm_m_format.func == mm_swm32_func) ||
 245                        (mmi.i_format.opcode == mm_sw32_op &&
 246                         mmi.i_format.rs == 29 &&
 247                         mmi.i_format.rt == 31));
 248        }
 249#else
 250        /* sw / sd $ra, offset($sp) */
 251        return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
 252                ip->i_format.rs == 29 &&
 253                ip->i_format.rt == 31;
 254#endif
 255}
 256
 257static inline int is_jump_ins(union mips_instruction *ip)
 258{
 259#ifdef CONFIG_CPU_MICROMIPS
 260        /*
 261         * jr16,jrc,jalr16,jalr16
 262         * jal
 263         * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
 264         * jraddiusp - NOT SUPPORTED
 265         *
 266         * microMIPS is kind of more fun...
 267         */
 268        union mips_instruction mmi;
 269
 270        mmi.word = (ip->halfword[0] << 16);
 271
 272        if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
 273            (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
 274            ip->j_format.opcode == mm_jal32_op)
 275                return 1;
 276        if (ip->r_format.opcode != mm_pool32a_op ||
 277                        ip->r_format.func != mm_pool32axf_op)
 278                return 0;
 279        return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
 280#else
 281        if (ip->j_format.opcode == j_op)
 282                return 1;
 283        if (ip->j_format.opcode == jal_op)
 284                return 1;
 285        if (ip->r_format.opcode != spec_op)
 286                return 0;
 287        return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
 288#endif
 289}
 290
 291static inline int is_sp_move_ins(union mips_instruction *ip)
 292{
 293#ifdef CONFIG_CPU_MICROMIPS
 294        /*
 295         * addiusp -imm
 296         * addius5 sp,-imm
 297         * addiu32 sp,sp,-imm
 298         * jradiussp - NOT SUPPORTED
 299         *
 300         * microMIPS is not more fun...
 301         */
 302        if (mm_insn_16bit(ip->halfword[0])) {
 303                union mips_instruction mmi;
 304
 305                mmi.word = (ip->halfword[0] << 16);
 306                return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
 307                         mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
 308                        (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
 309                         mmi.mm16_r5_format.rt == 29));
 310        }
 311        return (ip->mm_i_format.opcode == mm_addiu32_op &&
 312                 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
 313#else
 314        /* addiu/daddiu sp,sp,-imm */
 315        if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
 316                return 0;
 317        if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
 318                return 1;
 319#endif
 320        return 0;
 321}
 322
 323static int get_frame_info(struct mips_frame_info *info)
 324{
 325#ifdef CONFIG_CPU_MICROMIPS
 326        union mips_instruction *ip = (void *) (((char *) info->func) - 1);
 327#else
 328        union mips_instruction *ip = info->func;
 329#endif
 330        unsigned max_insns = info->func_size / sizeof(union mips_instruction);
 331        unsigned i;
 332
 333        info->pc_offset = -1;
 334        info->frame_size = 0;
 335
 336        if (!ip)
 337                goto err;
 338
 339        if (max_insns == 0)
 340                max_insns = 128U;       /* unknown function size */
 341        max_insns = min(128U, max_insns);
 342
 343        for (i = 0; i < max_insns; i++, ip++) {
 344
 345                if (is_jump_ins(ip))
 346                        break;
 347                if (!info->frame_size) {
 348                        if (is_sp_move_ins(ip))
 349                        {
 350#ifdef CONFIG_CPU_MICROMIPS
 351                                if (mm_insn_16bit(ip->halfword[0]))
 352                                {
 353                                        unsigned short tmp;
 354
 355                                        if (ip->halfword[0] & mm_addiusp_func)
 356                                        {
 357                                                tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
 358                                                info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
 359                                        } else {
 360                                                tmp = (ip->halfword[0] >> 1);
 361                                                info->frame_size = -(signed short)(tmp & 0xf);
 362                                        }
 363                                        ip = (void *) &ip->halfword[1];
 364                                        ip--;
 365                                } else
 366#endif
 367                                info->frame_size = - ip->i_format.simmediate;
 368                        }
 369                        continue;
 370                }
 371                if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
 372                        info->pc_offset =
 373                                ip->i_format.simmediate / sizeof(long);
 374                        break;
 375                }
 376        }
 377        if (info->frame_size && info->pc_offset >= 0) /* nested */
 378                return 0;
 379        if (info->pc_offset < 0) /* leaf */
 380                return 1;
 381        /* prologue seems boggus... */
 382err:
 383        return -1;
 384}
 385
 386static struct mips_frame_info schedule_mfi __read_mostly;
 387
 388#ifdef CONFIG_KALLSYMS
 389static unsigned long get___schedule_addr(void)
 390{
 391        return kallsyms_lookup_name("__schedule");
 392}
 393#else
 394static unsigned long get___schedule_addr(void)
 395{
 396        union mips_instruction *ip = (void *)schedule;
 397        int max_insns = 8;
 398        int i;
 399
 400        for (i = 0; i < max_insns; i++, ip++) {
 401                if (ip->j_format.opcode == j_op)
 402                        return J_TARGET(ip, ip->j_format.target);
 403        }
 404        return 0;
 405}
 406#endif
 407
 408static int __init frame_info_init(void)
 409{
 410        unsigned long size = 0;
 411#ifdef CONFIG_KALLSYMS
 412        unsigned long ofs;
 413#endif
 414        unsigned long addr;
 415
 416        addr = get___schedule_addr();
 417        if (!addr)
 418                addr = (unsigned long)schedule;
 419
 420#ifdef CONFIG_KALLSYMS
 421        kallsyms_lookup_size_offset(addr, &size, &ofs);
 422#endif
 423        schedule_mfi.func = (void *)addr;
 424        schedule_mfi.func_size = size;
 425
 426        get_frame_info(&schedule_mfi);
 427
 428        /*
 429         * Without schedule() frame info, result given by
 430         * thread_saved_pc() and get_wchan() are not reliable.
 431         */
 432        if (schedule_mfi.pc_offset < 0)
 433                printk("Can't analyze schedule() prologue at %p\n", schedule);
 434
 435        return 0;
 436}
 437
 438arch_initcall(frame_info_init);
 439
 440/*
 441 * Return saved PC of a blocked thread.
 442 */
 443unsigned long thread_saved_pc(struct task_struct *tsk)
 444{
 445        struct thread_struct *t = &tsk->thread;
 446
 447        /* New born processes are a special case */
 448        if (t->reg31 == (unsigned long) ret_from_fork)
 449                return t->reg31;
 450        if (schedule_mfi.pc_offset < 0)
 451                return 0;
 452        return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
 453}
 454
 455
 456#ifdef CONFIG_KALLSYMS
 457/* generic stack unwinding function */
 458unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
 459                                              unsigned long *sp,
 460                                              unsigned long pc,
 461                                              unsigned long *ra)
 462{
 463        struct mips_frame_info info;
 464        unsigned long size, ofs;
 465        int leaf;
 466        extern void ret_from_irq(void);
 467        extern void ret_from_exception(void);
 468
 469        if (!stack_page)
 470                return 0;
 471
 472        /*
 473         * If we reached the bottom of interrupt context,
 474         * return saved pc in pt_regs.
 475         */
 476        if (pc == (unsigned long)ret_from_irq ||
 477            pc == (unsigned long)ret_from_exception) {
 478                struct pt_regs *regs;
 479                if (*sp >= stack_page &&
 480                    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
 481                        regs = (struct pt_regs *)*sp;
 482                        pc = regs->cp0_epc;
 483                        if (__kernel_text_address(pc)) {
 484                                *sp = regs->regs[29];
 485                                *ra = regs->regs[31];
 486                                return pc;
 487                        }
 488                }
 489                return 0;
 490        }
 491        if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
 492                return 0;
 493        /*
 494         * Return ra if an exception occurred at the first instruction
 495         */
 496        if (unlikely(ofs == 0)) {
 497                pc = *ra;
 498                *ra = 0;
 499                return pc;
 500        }
 501
 502        info.func = (void *)(pc - ofs);
 503        info.func_size = ofs;   /* analyze from start to ofs */
 504        leaf = get_frame_info(&info);
 505        if (leaf < 0)
 506                return 0;
 507
 508        if (*sp < stack_page ||
 509            *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
 510                return 0;
 511
 512        if (leaf)
 513                /*
 514                 * For some extreme cases, get_frame_info() can
 515                 * consider wrongly a nested function as a leaf
 516                 * one. In that cases avoid to return always the
 517                 * same value.
 518                 */
 519                pc = pc != *ra ? *ra : 0;
 520        else
 521                pc = ((unsigned long *)(*sp))[info.pc_offset];
 522
 523        *sp += info.frame_size;
 524        *ra = 0;
 525        return __kernel_text_address(pc) ? pc : 0;
 526}
 527EXPORT_SYMBOL(unwind_stack_by_address);
 528
 529/* used by show_backtrace() */
 530unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
 531                           unsigned long pc, unsigned long *ra)
 532{
 533        unsigned long stack_page = (unsigned long)task_stack_page(task);
 534        return unwind_stack_by_address(stack_page, sp, pc, ra);
 535}
 536#endif
 537
 538/*
 539 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
 540 */
 541unsigned long get_wchan(struct task_struct *task)
 542{
 543        unsigned long pc = 0;
 544#ifdef CONFIG_KALLSYMS
 545        unsigned long sp;
 546        unsigned long ra = 0;
 547#endif
 548
 549        if (!task || task == current || task->state == TASK_RUNNING)
 550                goto out;
 551        if (!task_stack_page(task))
 552                goto out;
 553
 554        pc = thread_saved_pc(task);
 555
 556#ifdef CONFIG_KALLSYMS
 557        sp = task->thread.reg29 + schedule_mfi.frame_size;
 558
 559        while (in_sched_functions(pc))
 560                pc = unwind_stack(task, &sp, pc, &ra);
 561#endif
 562
 563out:
 564        return pc;
 565}
 566
 567/*
 568 * Don't forget that the stack pointer must be aligned on a 8 bytes
 569 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
 570 */
 571unsigned long arch_align_stack(unsigned long sp)
 572{
 573        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
 574                sp -= get_random_int() & ~PAGE_MASK;
 575
 576        return sp & ALMASK;
 577}
 578