linux/arch/mips/kernel/process.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
   7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
   8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   9 * Copyright (C) 2004 Thiemo Seufer
  10 * Copyright (C) 2013  Imagination Technologies Ltd.
  11 */
  12#include <linux/errno.h>
  13#include <linux/sched.h>
  14#include <linux/sched/debug.h>
  15#include <linux/sched/task.h>
  16#include <linux/sched/task_stack.h>
  17#include <linux/tick.h>
  18#include <linux/kernel.h>
  19#include <linux/mm.h>
  20#include <linux/stddef.h>
  21#include <linux/unistd.h>
  22#include <linux/export.h>
  23#include <linux/ptrace.h>
  24#include <linux/mman.h>
  25#include <linux/personality.h>
  26#include <linux/sys.h>
  27#include <linux/init.h>
  28#include <linux/completion.h>
  29#include <linux/kallsyms.h>
  30#include <linux/random.h>
  31#include <linux/prctl.h>
  32#include <linux/nmi.h>
  33#include <linux/cpu.h>
  34
  35#include <asm/abi.h>
  36#include <asm/asm.h>
  37#include <asm/bootinfo.h>
  38#include <asm/cpu.h>
  39#include <asm/dsemul.h>
  40#include <asm/dsp.h>
  41#include <asm/fpu.h>
  42#include <asm/irq.h>
  43#include <asm/mips-cps.h>
  44#include <asm/msa.h>
  45#include <asm/pgtable.h>
  46#include <asm/mipsregs.h>
  47#include <asm/processor.h>
  48#include <asm/reg.h>
  49#include <linux/uaccess.h>
  50#include <asm/io.h>
  51#include <asm/elf.h>
  52#include <asm/isadep.h>
  53#include <asm/inst.h>
  54#include <asm/stacktrace.h>
  55#include <asm/irq_regs.h>
  56
  57#ifdef CONFIG_HOTPLUG_CPU
  58void arch_cpu_idle_dead(void)
  59{
  60        play_dead();
  61}
  62#endif
  63
  64asmlinkage void ret_from_fork(void);
  65asmlinkage void ret_from_kernel_thread(void);
  66
  67void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
  68{
  69        unsigned long status;
  70
  71        /* New thread loses kernel privileges. */
  72        status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
  73        status |= KU_USER;
  74        regs->cp0_status = status;
  75        lose_fpu(0);
  76        clear_thread_flag(TIF_MSA_CTX_LIVE);
  77        clear_used_math();
  78        atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
  79        init_dsp();
  80        regs->cp0_epc = pc;
  81        regs->regs[29] = sp;
  82}
  83
  84void exit_thread(struct task_struct *tsk)
  85{
  86        /*
  87         * User threads may have allocated a delay slot emulation frame.
  88         * If so, clean up that allocation.
  89         */
  90        if (!(current->flags & PF_KTHREAD))
  91                dsemul_thread_cleanup(tsk);
  92}
  93
  94int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  95{
  96        /*
  97         * Save any process state which is live in hardware registers to the
  98         * parent context prior to duplication. This prevents the new child
  99         * state becoming stale if the parent is preempted before copy_thread()
 100         * gets a chance to save the parent's live hardware registers to the
 101         * child context.
 102         */
 103        preempt_disable();
 104
 105        if (is_msa_enabled())
 106                save_msa(current);
 107        else if (is_fpu_owner())
 108                _save_fp(current);
 109
 110        save_dsp(current);
 111
 112        preempt_enable();
 113
 114        *dst = *src;
 115        return 0;
 116}
 117
 118/*
 119 * Copy architecture-specific thread state
 120 */
 121int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
 122        unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
 123{
 124        struct thread_info *ti = task_thread_info(p);
 125        struct pt_regs *childregs, *regs = current_pt_regs();
 126        unsigned long childksp;
 127
 128        childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
 129
 130        /* set up new TSS. */
 131        childregs = (struct pt_regs *) childksp - 1;
 132        /*  Put the stack after the struct pt_regs.  */
 133        childksp = (unsigned long) childregs;
 134        p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
 135        if (unlikely(p->flags & PF_KTHREAD)) {
 136                /* kernel thread */
 137                unsigned long status = p->thread.cp0_status;
 138                memset(childregs, 0, sizeof(struct pt_regs));
 139                ti->addr_limit = KERNEL_DS;
 140                p->thread.reg16 = usp; /* fn */
 141                p->thread.reg17 = kthread_arg;
 142                p->thread.reg29 = childksp;
 143                p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
 144#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 145                status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
 146                         ((status & (ST0_KUC | ST0_IEC)) << 2);
 147#else
 148                status |= ST0_EXL;
 149#endif
 150                childregs->cp0_status = status;
 151                return 0;
 152        }
 153
 154        /* user thread */
 155        *childregs = *regs;
 156        childregs->regs[7] = 0; /* Clear error flag */
 157        childregs->regs[2] = 0; /* Child gets zero as return value */
 158        if (usp)
 159                childregs->regs[29] = usp;
 160        ti->addr_limit = USER_DS;
 161
 162        p->thread.reg29 = (unsigned long) childregs;
 163        p->thread.reg31 = (unsigned long) ret_from_fork;
 164
 165        /*
 166         * New tasks lose permission to use the fpu. This accelerates context
 167         * switching for most programs since they don't use the fpu.
 168         */
 169        childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
 170
 171        clear_tsk_thread_flag(p, TIF_USEDFPU);
 172        clear_tsk_thread_flag(p, TIF_USEDMSA);
 173        clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
 174
 175#ifdef CONFIG_MIPS_MT_FPAFF
 176        clear_tsk_thread_flag(p, TIF_FPUBOUND);
 177#endif /* CONFIG_MIPS_MT_FPAFF */
 178
 179        atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
 180
 181        if (clone_flags & CLONE_SETTLS)
 182                ti->tp_value = tls;
 183
 184        return 0;
 185}
 186
 187#ifdef CONFIG_STACKPROTECTOR
 188#include <linux/stackprotector.h>
 189unsigned long __stack_chk_guard __read_mostly;
 190EXPORT_SYMBOL(__stack_chk_guard);
 191#endif
 192
 193struct mips_frame_info {
 194        void            *func;
 195        unsigned long   func_size;
 196        int             frame_size;
 197        int             pc_offset;
 198};
 199
 200#define J_TARGET(pc,target)     \
 201                (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
 202
 203static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
 204{
 205#ifdef CONFIG_CPU_MICROMIPS
 206        /*
 207         * swsp ra,offset
 208         * swm16 reglist,offset(sp)
 209         * swm32 reglist,offset(sp)
 210         * sw32 ra,offset(sp)
 211         * jradiussp - NOT SUPPORTED
 212         *
 213         * microMIPS is way more fun...
 214         */
 215        if (mm_insn_16bit(ip->word >> 16)) {
 216                switch (ip->mm16_r5_format.opcode) {
 217                case mm_swsp16_op:
 218                        if (ip->mm16_r5_format.rt != 31)
 219                                return 0;
 220
 221                        *poff = ip->mm16_r5_format.imm;
 222                        *poff = (*poff << 2) / sizeof(ulong);
 223                        return 1;
 224
 225                case mm_pool16c_op:
 226                        switch (ip->mm16_m_format.func) {
 227                        case mm_swm16_op:
 228                                *poff = ip->mm16_m_format.imm;
 229                                *poff += 1 + ip->mm16_m_format.rlist;
 230                                *poff = (*poff << 2) / sizeof(ulong);
 231                                return 1;
 232
 233                        default:
 234                                return 0;
 235                        }
 236
 237                default:
 238                        return 0;
 239                }
 240        }
 241
 242        switch (ip->i_format.opcode) {
 243        case mm_sw32_op:
 244                if (ip->i_format.rs != 29)
 245                        return 0;
 246                if (ip->i_format.rt != 31)
 247                        return 0;
 248
 249                *poff = ip->i_format.simmediate / sizeof(ulong);
 250                return 1;
 251
 252        case mm_pool32b_op:
 253                switch (ip->mm_m_format.func) {
 254                case mm_swm32_func:
 255                        if (ip->mm_m_format.rd < 0x10)
 256                                return 0;
 257                        if (ip->mm_m_format.base != 29)
 258                                return 0;
 259
 260                        *poff = ip->mm_m_format.simmediate;
 261                        *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
 262                        *poff /= sizeof(ulong);
 263                        return 1;
 264                default:
 265                        return 0;
 266                }
 267
 268        default:
 269                return 0;
 270        }
 271#else
 272        /* sw / sd $ra, offset($sp) */
 273        if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
 274                ip->i_format.rs == 29 && ip->i_format.rt == 31) {
 275                *poff = ip->i_format.simmediate / sizeof(ulong);
 276                return 1;
 277        }
 278
 279        return 0;
 280#endif
 281}
 282
 283static inline int is_jump_ins(union mips_instruction *ip)
 284{
 285#ifdef CONFIG_CPU_MICROMIPS
 286        /*
 287         * jr16,jrc,jalr16,jalr16
 288         * jal
 289         * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
 290         * jraddiusp - NOT SUPPORTED
 291         *
 292         * microMIPS is kind of more fun...
 293         */
 294        if (mm_insn_16bit(ip->word >> 16)) {
 295                if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
 296                    (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
 297                        return 1;
 298                return 0;
 299        }
 300
 301        if (ip->j_format.opcode == mm_j32_op)
 302                return 1;
 303        if (ip->j_format.opcode == mm_jal32_op)
 304                return 1;
 305        if (ip->r_format.opcode != mm_pool32a_op ||
 306                        ip->r_format.func != mm_pool32axf_op)
 307                return 0;
 308        return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
 309#else
 310        if (ip->j_format.opcode == j_op)
 311                return 1;
 312        if (ip->j_format.opcode == jal_op)
 313                return 1;
 314        if (ip->r_format.opcode != spec_op)
 315                return 0;
 316        return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
 317#endif
 318}
 319
 320static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
 321{
 322#ifdef CONFIG_CPU_MICROMIPS
 323        unsigned short tmp;
 324
 325        /*
 326         * addiusp -imm
 327         * addius5 sp,-imm
 328         * addiu32 sp,sp,-imm
 329         * jradiussp - NOT SUPPORTED
 330         *
 331         * microMIPS is not more fun...
 332         */
 333        if (mm_insn_16bit(ip->word >> 16)) {
 334                if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
 335                    ip->mm16_r3_format.simmediate & mm_addiusp_func) {
 336                        tmp = ip->mm_b0_format.simmediate >> 1;
 337                        tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
 338                        if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
 339                                tmp ^= 0x100;
 340                        *frame_size = -(signed short)(tmp << 2);
 341                        return 1;
 342                }
 343                if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
 344                    ip->mm16_r5_format.rt == 29) {
 345                        tmp = ip->mm16_r5_format.imm >> 1;
 346                        *frame_size = -(signed short)(tmp & 0xf);
 347                        return 1;
 348                }
 349                return 0;
 350        }
 351
 352        if (ip->mm_i_format.opcode == mm_addiu32_op &&
 353            ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
 354                *frame_size = -ip->i_format.simmediate;
 355                return 1;
 356        }
 357#else
 358        /* addiu/daddiu sp,sp,-imm */
 359        if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
 360                return 0;
 361
 362        if (ip->i_format.opcode == addiu_op ||
 363            ip->i_format.opcode == daddiu_op) {
 364                *frame_size = -ip->i_format.simmediate;
 365                return 1;
 366        }
 367#endif
 368        return 0;
 369}
 370
 371static int get_frame_info(struct mips_frame_info *info)
 372{
 373        bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
 374        union mips_instruction insn, *ip;
 375        const unsigned int max_insns = 128;
 376        unsigned int last_insn_size = 0;
 377        unsigned int i;
 378        bool saw_jump = false;
 379
 380        info->pc_offset = -1;
 381        info->frame_size = 0;
 382
 383        ip = (void *)msk_isa16_mode((ulong)info->func);
 384        if (!ip)
 385                goto err;
 386
 387        for (i = 0; i < max_insns; i++) {
 388                ip = (void *)ip + last_insn_size;
 389
 390                if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
 391                        insn.word = ip->halfword[0] << 16;
 392                        last_insn_size = 2;
 393                } else if (is_mmips) {
 394                        insn.word = ip->halfword[0] << 16 | ip->halfword[1];
 395                        last_insn_size = 4;
 396                } else {
 397                        insn.word = ip->word;
 398                        last_insn_size = 4;
 399                }
 400
 401                if (!info->frame_size) {
 402                        is_sp_move_ins(&insn, &info->frame_size);
 403                        continue;
 404                } else if (!saw_jump && is_jump_ins(ip)) {
 405                        /*
 406                         * If we see a jump instruction, we are finished
 407                         * with the frame save.
 408                         *
 409                         * Some functions can have a shortcut return at
 410                         * the beginning of the function, so don't start
 411                         * looking for jump instruction until we see the
 412                         * frame setup.
 413                         *
 414                         * The RA save instruction can get put into the
 415                         * delay slot of the jump instruction, so look
 416                         * at the next instruction, too.
 417                         */
 418                        saw_jump = true;
 419                        continue;
 420                }
 421                if (info->pc_offset == -1 &&
 422                    is_ra_save_ins(&insn, &info->pc_offset))
 423                        break;
 424                if (saw_jump)
 425                        break;
 426        }
 427        if (info->frame_size && info->pc_offset >= 0) /* nested */
 428                return 0;
 429        if (info->pc_offset < 0) /* leaf */
 430                return 1;
 431        /* prologue seems bogus... */
 432err:
 433        return -1;
 434}
 435
 436static struct mips_frame_info schedule_mfi __read_mostly;
 437
 438#ifdef CONFIG_KALLSYMS
 439static unsigned long get___schedule_addr(void)
 440{
 441        return kallsyms_lookup_name("__schedule");
 442}
 443#else
 444static unsigned long get___schedule_addr(void)
 445{
 446        union mips_instruction *ip = (void *)schedule;
 447        int max_insns = 8;
 448        int i;
 449
 450        for (i = 0; i < max_insns; i++, ip++) {
 451                if (ip->j_format.opcode == j_op)
 452                        return J_TARGET(ip, ip->j_format.target);
 453        }
 454        return 0;
 455}
 456#endif
 457
 458static int __init frame_info_init(void)
 459{
 460        unsigned long size = 0;
 461#ifdef CONFIG_KALLSYMS
 462        unsigned long ofs;
 463#endif
 464        unsigned long addr;
 465
 466        addr = get___schedule_addr();
 467        if (!addr)
 468                addr = (unsigned long)schedule;
 469
 470#ifdef CONFIG_KALLSYMS
 471        kallsyms_lookup_size_offset(addr, &size, &ofs);
 472#endif
 473        schedule_mfi.func = (void *)addr;
 474        schedule_mfi.func_size = size;
 475
 476        get_frame_info(&schedule_mfi);
 477
 478        /*
 479         * Without schedule() frame info, result given by
 480         * thread_saved_pc() and get_wchan() are not reliable.
 481         */
 482        if (schedule_mfi.pc_offset < 0)
 483                printk("Can't analyze schedule() prologue at %p\n", schedule);
 484
 485        return 0;
 486}
 487
 488arch_initcall(frame_info_init);
 489
 490/*
 491 * Return saved PC of a blocked thread.
 492 */
 493static unsigned long thread_saved_pc(struct task_struct *tsk)
 494{
 495        struct thread_struct *t = &tsk->thread;
 496
 497        /* New born processes are a special case */
 498        if (t->reg31 == (unsigned long) ret_from_fork)
 499                return t->reg31;
 500        if (schedule_mfi.pc_offset < 0)
 501                return 0;
 502        return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
 503}
 504
 505
 506#ifdef CONFIG_KALLSYMS
 507/* generic stack unwinding function */
 508unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
 509                                              unsigned long *sp,
 510                                              unsigned long pc,
 511                                              unsigned long *ra)
 512{
 513        unsigned long low, high, irq_stack_high;
 514        struct mips_frame_info info;
 515        unsigned long size, ofs;
 516        struct pt_regs *regs;
 517        int leaf;
 518
 519        if (!stack_page)
 520                return 0;
 521
 522        /*
 523         * IRQ stacks start at IRQ_STACK_START
 524         * task stacks at THREAD_SIZE - 32
 525         */
 526        low = stack_page;
 527        if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
 528                high = stack_page + IRQ_STACK_START;
 529                irq_stack_high = high;
 530        } else {
 531                high = stack_page + THREAD_SIZE - 32;
 532                irq_stack_high = 0;
 533        }
 534
 535        /*
 536         * If we reached the top of the interrupt stack, start unwinding
 537         * the interrupted task stack.
 538         */
 539        if (unlikely(*sp == irq_stack_high)) {
 540                unsigned long task_sp = *(unsigned long *)*sp;
 541
 542                /*
 543                 * Check that the pointer saved in the IRQ stack head points to
 544                 * something within the stack of the current task
 545                 */
 546                if (!object_is_on_stack((void *)task_sp))
 547                        return 0;
 548
 549                /*
 550                 * Follow pointer to tasks kernel stack frame where interrupted
 551                 * state was saved.
 552                 */
 553                regs = (struct pt_regs *)task_sp;
 554                pc = regs->cp0_epc;
 555                if (!user_mode(regs) && __kernel_text_address(pc)) {
 556                        *sp = regs->regs[29];
 557                        *ra = regs->regs[31];
 558                        return pc;
 559                }
 560                return 0;
 561        }
 562        if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
 563                return 0;
 564        /*
 565         * Return ra if an exception occurred at the first instruction
 566         */
 567        if (unlikely(ofs == 0)) {
 568                pc = *ra;
 569                *ra = 0;
 570                return pc;
 571        }
 572
 573        info.func = (void *)(pc - ofs);
 574        info.func_size = ofs;   /* analyze from start to ofs */
 575        leaf = get_frame_info(&info);
 576        if (leaf < 0)
 577                return 0;
 578
 579        if (*sp < low || *sp + info.frame_size > high)
 580                return 0;
 581
 582        if (leaf)
 583                /*
 584                 * For some extreme cases, get_frame_info() can
 585                 * consider wrongly a nested function as a leaf
 586                 * one. In that cases avoid to return always the
 587                 * same value.
 588                 */
 589                pc = pc != *ra ? *ra : 0;
 590        else
 591                pc = ((unsigned long *)(*sp))[info.pc_offset];
 592
 593        *sp += info.frame_size;
 594        *ra = 0;
 595        return __kernel_text_address(pc) ? pc : 0;
 596}
 597EXPORT_SYMBOL(unwind_stack_by_address);
 598
 599/* used by show_backtrace() */
 600unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
 601                           unsigned long pc, unsigned long *ra)
 602{
 603        unsigned long stack_page = 0;
 604        int cpu;
 605
 606        for_each_possible_cpu(cpu) {
 607                if (on_irq_stack(cpu, *sp)) {
 608                        stack_page = (unsigned long)irq_stack[cpu];
 609                        break;
 610                }
 611        }
 612
 613        if (!stack_page)
 614                stack_page = (unsigned long)task_stack_page(task);
 615
 616        return unwind_stack_by_address(stack_page, sp, pc, ra);
 617}
 618#endif
 619
 620/*
 621 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
 622 */
 623unsigned long get_wchan(struct task_struct *task)
 624{
 625        unsigned long pc = 0;
 626#ifdef CONFIG_KALLSYMS
 627        unsigned long sp;
 628        unsigned long ra = 0;
 629#endif
 630
 631        if (!task || task == current || task->state == TASK_RUNNING)
 632                goto out;
 633        if (!task_stack_page(task))
 634                goto out;
 635
 636        pc = thread_saved_pc(task);
 637
 638#ifdef CONFIG_KALLSYMS
 639        sp = task->thread.reg29 + schedule_mfi.frame_size;
 640
 641        while (in_sched_functions(pc))
 642                pc = unwind_stack(task, &sp, pc, &ra);
 643#endif
 644
 645out:
 646        return pc;
 647}
 648
 649unsigned long mips_stack_top(void)
 650{
 651        unsigned long top = TASK_SIZE & PAGE_MASK;
 652
 653        /* One page for branch delay slot "emulation" */
 654        top -= PAGE_SIZE;
 655
 656        /* Space for the VDSO, data page & GIC user page */
 657        top -= PAGE_ALIGN(current->thread.abi->vdso->size);
 658        top -= PAGE_SIZE;
 659        top -= mips_gic_present() ? PAGE_SIZE : 0;
 660
 661        /* Space for cache colour alignment */
 662        if (cpu_has_dc_aliases)
 663                top -= shm_align_mask + 1;
 664
 665        /* Space to randomize the VDSO base */
 666        if (current->flags & PF_RANDOMIZE)
 667                top -= VDSO_RANDOMIZE_SIZE;
 668
 669        return top;
 670}
 671
 672/*
 673 * Don't forget that the stack pointer must be aligned on a 8 bytes
 674 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
 675 */
 676unsigned long arch_align_stack(unsigned long sp)
 677{
 678        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
 679                sp -= get_random_int() & ~PAGE_MASK;
 680
 681        return sp & ALMASK;
 682}
 683
 684static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
 685static struct cpumask backtrace_csd_busy;
 686
 687static void handle_backtrace(void *info)
 688{
 689        nmi_cpu_backtrace(get_irq_regs());
 690        cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
 691}
 692
 693static void raise_backtrace(cpumask_t *mask)
 694{
 695        call_single_data_t *csd;
 696        int cpu;
 697
 698        for_each_cpu(cpu, mask) {
 699                /*
 700                 * If we previously sent an IPI to the target CPU & it hasn't
 701                 * cleared its bit in the busy cpumask then it didn't handle
 702                 * our previous IPI & it's not safe for us to reuse the
 703                 * call_single_data_t.
 704                 */
 705                if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
 706                        pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
 707                                cpu);
 708                        continue;
 709                }
 710
 711                csd = &per_cpu(backtrace_csd, cpu);
 712                csd->func = handle_backtrace;
 713                smp_call_function_single_async(cpu, csd);
 714        }
 715}
 716
 717void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
 718{
 719        nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
 720}
 721
 722int mips_get_process_fp_mode(struct task_struct *task)
 723{
 724        int value = 0;
 725
 726        if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
 727                value |= PR_FP_MODE_FR;
 728        if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
 729                value |= PR_FP_MODE_FRE;
 730
 731        return value;
 732}
 733
 734static long prepare_for_fp_mode_switch(void *unused)
 735{
 736        /*
 737         * This is icky, but we use this to simply ensure that all CPUs have
 738         * context switched, regardless of whether they were previously running
 739         * kernel or user code. This ensures that no CPU that a mode-switching
 740         * program may execute on keeps its FPU enabled (& in the old mode)
 741         * throughout the mode switch.
 742         */
 743        return 0;
 744}
 745
 746int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
 747{
 748        const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
 749        struct task_struct *t;
 750        struct cpumask process_cpus;
 751        int cpu;
 752
 753        /* If nothing to change, return right away, successfully.  */
 754        if (value == mips_get_process_fp_mode(task))
 755                return 0;
 756
 757        /* Only accept a mode change if 64-bit FP enabled for o32.  */
 758        if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
 759                return -EOPNOTSUPP;
 760
 761        /* And only for o32 tasks.  */
 762        if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
 763                return -EOPNOTSUPP;
 764
 765        /* Check the value is valid */
 766        if (value & ~known_bits)
 767                return -EOPNOTSUPP;
 768
 769        /* Setting FRE without FR is not supported.  */
 770        if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
 771                return -EOPNOTSUPP;
 772
 773        /* Avoid inadvertently triggering emulation */
 774        if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
 775            !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
 776                return -EOPNOTSUPP;
 777        if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
 778                return -EOPNOTSUPP;
 779
 780        /* FR = 0 not supported in MIPS R6 */
 781        if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
 782                return -EOPNOTSUPP;
 783
 784        /* Indicate the new FP mode in each thread */
 785        for_each_thread(task, t) {
 786                /* Update desired FP register width */
 787                if (value & PR_FP_MODE_FR) {
 788                        clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
 789                } else {
 790                        set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
 791                        clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
 792                }
 793
 794                /* Update desired FP single layout */
 795                if (value & PR_FP_MODE_FRE)
 796                        set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
 797                else
 798                        clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
 799        }
 800
 801        /*
 802         * We need to ensure that all threads in the process have switched mode
 803         * before returning, in order to allow userland to not worry about
 804         * races. We can do this by forcing all CPUs that any thread in the
 805         * process may be running on to schedule something else - in this case
 806         * prepare_for_fp_mode_switch().
 807         *
 808         * We begin by generating a mask of all CPUs that any thread in the
 809         * process may be running on.
 810         */
 811        cpumask_clear(&process_cpus);
 812        for_each_thread(task, t)
 813                cpumask_set_cpu(task_cpu(t), &process_cpus);
 814
 815        /*
 816         * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
 817         *
 818         * The CPUs may have rescheduled already since we switched mode or
 819         * generated the cpumask, but that doesn't matter. If the task in this
 820         * process is scheduled out then our scheduling
 821         * prepare_for_fp_mode_switch() will simply be redundant. If it's
 822         * scheduled in then it will already have picked up the new FP mode
 823         * whilst doing so.
 824         */
 825        get_online_cpus();
 826        for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
 827                work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
 828        put_online_cpus();
 829
 830        return 0;
 831}
 832
 833#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 834void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
 835{
 836        unsigned int i;
 837
 838        for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
 839                /* k0/k1 are copied as zero. */
 840                if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
 841                        uregs[i] = 0;
 842                else
 843                        uregs[i] = regs->regs[i - MIPS32_EF_R0];
 844        }
 845
 846        uregs[MIPS32_EF_LO] = regs->lo;
 847        uregs[MIPS32_EF_HI] = regs->hi;
 848        uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
 849        uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
 850        uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
 851        uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
 852}
 853#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 854
 855#ifdef CONFIG_64BIT
 856void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
 857{
 858        unsigned int i;
 859
 860        for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
 861                /* k0/k1 are copied as zero. */
 862                if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
 863                        uregs[i] = 0;
 864                else
 865                        uregs[i] = regs->regs[i - MIPS64_EF_R0];
 866        }
 867
 868        uregs[MIPS64_EF_LO] = regs->lo;
 869        uregs[MIPS64_EF_HI] = regs->hi;
 870        uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
 871        uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
 872        uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
 873        uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
 874}
 875#endif /* CONFIG_64BIT */
 876