linux/arch/blackfin/kernel/process.c
<<
>>
Prefs
   1/*
   2 * Blackfin architecture-dependent process handling
   3 *
   4 * Copyright 2004-2009 Analog Devices Inc.
   5 *
   6 * Licensed under the GPL-2 or later
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/unistd.h>
  11#include <linux/user.h>
  12#include <linux/uaccess.h>
  13#include <linux/slab.h>
  14#include <linux/sched.h>
  15#include <linux/sched/debug.h>
  16#include <linux/sched/task.h>
  17#include <linux/sched/task_stack.h>
  18#include <linux/mm_types.h>
  19#include <linux/tick.h>
  20#include <linux/fs.h>
  21#include <linux/err.h>
  22
  23#include <asm/blackfin.h>
  24#include <asm/fixed_code.h>
  25#include <asm/mem_map.h>
  26#include <asm/irq.h>
  27
  28asmlinkage void ret_from_fork(void);
  29
  30/* Points to the SDRAM backup memory for the stack that is currently in
  31 * L1 scratchpad memory.
  32 */
  33void *current_l1_stack_save;
  34
  35/* The number of tasks currently using a L1 stack area.  The SRAM is
  36 * allocated/deallocated whenever this changes from/to zero.
  37 */
  38int nr_l1stack_tasks;
  39
  40/* Start and length of the area in L1 scratchpad memory which we've allocated
  41 * for process stacks.
  42 */
  43void *l1_stack_base;
  44unsigned long l1_stack_len;
  45
  46void (*pm_power_off)(void) = NULL;
  47EXPORT_SYMBOL(pm_power_off);
  48
  49/*
  50 * The idle loop on BFIN
  51 */
  52#ifdef CONFIG_IDLE_L1
  53void arch_cpu_idle(void)__attribute__((l1_text));
  54#endif
  55
  56/*
  57 * This is our default idle handler.  We need to disable
  58 * interrupts here to ensure we don't miss a wakeup call.
  59 */
  60void arch_cpu_idle(void)
  61{
  62#ifdef CONFIG_IPIPE
  63        ipipe_suspend_domain();
  64#endif
  65        hard_local_irq_disable();
  66        if (!need_resched())
  67                idle_with_irq_disabled();
  68
  69        hard_local_irq_enable();
  70}
  71
  72#ifdef CONFIG_HOTPLUG_CPU
  73void arch_cpu_idle_dead(void)
  74{
  75        cpu_die();
  76}
  77#endif
  78
  79/*
  80 * Do necessary setup to start up a newly executed thread.
  81 *
  82 * pass the data segment into user programs if it exists,
  83 * it can't hurt anything as far as I can tell
  84 */
  85void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
  86{
  87        regs->pc = new_ip;
  88        if (current->mm)
  89                regs->p5 = current->mm->start_data;
  90#ifndef CONFIG_SMP
  91        task_thread_info(current)->l1_task_info.stack_start =
  92                (void *)current->mm->context.stack_start;
  93        task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
  94        memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info,
  95               sizeof(*L1_SCRATCH_TASK_INFO));
  96#endif
  97        wrusp(new_sp);
  98}
  99EXPORT_SYMBOL_GPL(start_thread);
 100
 101void flush_thread(void)
 102{
 103}
 104
 105asmlinkage int bfin_clone(unsigned long clone_flags, unsigned long newsp)
 106{
 107#ifdef __ARCH_SYNC_CORE_DCACHE
 108        if (current->nr_cpus_allowed == num_possible_cpus())
 109                set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
 110#endif
 111        if (newsp)
 112                newsp -= 12;
 113        return do_fork(clone_flags, newsp, 0, NULL, NULL);
 114}
 115
 116int
 117copy_thread(unsigned long clone_flags,
 118            unsigned long usp, unsigned long topstk,
 119            struct task_struct *p)
 120{
 121        struct pt_regs *childregs;
 122        unsigned long *v;
 123
 124        childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
 125        v = ((unsigned long *)childregs) - 2;
 126        if (unlikely(p->flags & PF_KTHREAD)) {
 127                memset(childregs, 0, sizeof(struct pt_regs));
 128                v[0] = usp;
 129                v[1] = topstk;
 130                childregs->orig_p0 = -1;
 131                childregs->ipend = 0x8000;
 132                __asm__ __volatile__("%0 = syscfg;":"=da"(childregs->syscfg):);
 133                p->thread.usp = 0;
 134        } else {
 135                *childregs = *current_pt_regs();
 136                childregs->r0 = 0;
 137                p->thread.usp = usp ? : rdusp();
 138                v[0] = v[1] = 0;
 139        }
 140
 141        p->thread.ksp = (unsigned long)v;
 142        p->thread.pc = (unsigned long)ret_from_fork;
 143
 144        return 0;
 145}
 146
 147unsigned long get_wchan(struct task_struct *p)
 148{
 149        unsigned long fp, pc;
 150        unsigned long stack_page;
 151        int count = 0;
 152        if (!p || p == current || p->state == TASK_RUNNING)
 153                return 0;
 154
 155        stack_page = (unsigned long)p;
 156        fp = p->thread.usp;
 157        do {
 158                if (fp < stack_page + sizeof(struct thread_info) ||
 159                    fp >= 8184 + stack_page)
 160                        return 0;
 161                pc = ((unsigned long *)fp)[1];
 162                if (!in_sched_functions(pc))
 163                        return pc;
 164                fp = *(unsigned long *)fp;
 165        }
 166        while (count++ < 16);
 167        return 0;
 168}
 169
 170void finish_atomic_sections (struct pt_regs *regs)
 171{
 172        int __user *up0 = (int __user *)regs->p0;
 173
 174        switch (regs->pc) {
 175        default:
 176                /* not in middle of an atomic step, so resume like normal */
 177                return;
 178
 179        case ATOMIC_XCHG32 + 2:
 180                put_user(regs->r1, up0);
 181                break;
 182
 183        case ATOMIC_CAS32 + 2:
 184        case ATOMIC_CAS32 + 4:
 185                if (regs->r0 == regs->r1)
 186        case ATOMIC_CAS32 + 6:
 187                        put_user(regs->r2, up0);
 188                break;
 189
 190        case ATOMIC_ADD32 + 2:
 191                regs->r0 = regs->r1 + regs->r0;
 192                /* fall through */
 193        case ATOMIC_ADD32 + 4:
 194                put_user(regs->r0, up0);
 195                break;
 196
 197        case ATOMIC_SUB32 + 2:
 198                regs->r0 = regs->r1 - regs->r0;
 199                /* fall through */
 200        case ATOMIC_SUB32 + 4:
 201                put_user(regs->r0, up0);
 202                break;
 203
 204        case ATOMIC_IOR32 + 2:
 205                regs->r0 = regs->r1 | regs->r0;
 206                /* fall through */
 207        case ATOMIC_IOR32 + 4:
 208                put_user(regs->r0, up0);
 209                break;
 210
 211        case ATOMIC_AND32 + 2:
 212                regs->r0 = regs->r1 & regs->r0;
 213                /* fall through */
 214        case ATOMIC_AND32 + 4:
 215                put_user(regs->r0, up0);
 216                break;
 217
 218        case ATOMIC_XOR32 + 2:
 219                regs->r0 = regs->r1 ^ regs->r0;
 220                /* fall through */
 221        case ATOMIC_XOR32 + 4:
 222                put_user(regs->r0, up0);
 223                break;
 224        }
 225
 226        /*
 227         * We've finished the atomic section, and the only thing left for
 228         * userspace is to do a RTS, so we might as well handle that too
 229         * since we need to update the PC anyways.
 230         */
 231        regs->pc = regs->rets;
 232}
 233
 234static inline
 235int in_mem(unsigned long addr, unsigned long size,
 236           unsigned long start, unsigned long end)
 237{
 238        return addr >= start && addr + size <= end;
 239}
 240static inline
 241int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
 242                     unsigned long const_addr, unsigned long const_size)
 243{
 244        return const_size &&
 245               in_mem(addr, size, const_addr + off, const_addr + const_size);
 246}
 247static inline
 248int in_mem_const(unsigned long addr, unsigned long size,
 249                 unsigned long const_addr, unsigned long const_size)
 250{
 251        return in_mem_const_off(addr, size, 0, const_addr, const_size);
 252}
 253#ifdef CONFIG_BF60x
 254#define ASYNC_ENABLED(bnum, bctlnum)    1
 255#else
 256#define ASYNC_ENABLED(bnum, bctlnum) \
 257({ \
 258        (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
 259        bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
 260        1; \
 261})
 262#endif
 263/*
 264 * We can't read EBIU banks that aren't enabled or we end up hanging
 265 * on the access to the async space.  Make sure we validate accesses
 266 * that cross async banks too.
 267 *      0 - found, but unusable
 268 *      1 - found & usable
 269 *      2 - not found
 270 */
 271static
 272int in_async(unsigned long addr, unsigned long size)
 273{
 274        if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE) {
 275                if (!ASYNC_ENABLED(0, 0))
 276                        return 0;
 277                if (addr + size <= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)
 278                        return 1;
 279                size -= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE - addr;
 280                addr = ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE;
 281        }
 282        if (addr >= ASYNC_BANK1_BASE && addr < ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE) {
 283                if (!ASYNC_ENABLED(1, 0))
 284                        return 0;
 285                if (addr + size <= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)
 286                        return 1;
 287                size -= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE - addr;
 288                addr = ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE;
 289        }
 290        if (addr >= ASYNC_BANK2_BASE && addr < ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE) {
 291                if (!ASYNC_ENABLED(2, 1))
 292                        return 0;
 293                if (addr + size <= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE)
 294                        return 1;
 295                size -= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE - addr;
 296                addr = ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE;
 297        }
 298        if (addr >= ASYNC_BANK3_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
 299                if (ASYNC_ENABLED(3, 1))
 300                        return 0;
 301                if (addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
 302                        return 1;
 303                return 0;
 304        }
 305
 306        /* not within async bounds */
 307        return 2;
 308}
 309
 310int bfin_mem_access_type(unsigned long addr, unsigned long size)
 311{
 312        int cpu = raw_smp_processor_id();
 313
 314        /* Check that things do not wrap around */
 315        if (addr > ULONG_MAX - size)
 316                return -EFAULT;
 317
 318        if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
 319                return BFIN_MEM_ACCESS_CORE;
 320
 321        if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
 322                return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
 323        if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
 324                return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
 325        if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
 326                return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
 327        if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
 328                return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
 329#ifdef COREB_L1_CODE_START
 330        if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
 331                return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
 332        if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
 333                return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
 334        if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
 335                return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
 336        if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
 337                return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
 338#endif
 339        if (in_mem_const(addr, size, L2_START, L2_LENGTH))
 340                return BFIN_MEM_ACCESS_CORE;
 341
 342        if (addr >= SYSMMR_BASE)
 343                return BFIN_MEM_ACCESS_CORE_ONLY;
 344
 345        switch (in_async(addr, size)) {
 346        case 0: return -EFAULT;
 347        case 1: return BFIN_MEM_ACCESS_CORE;
 348        case 2: /* fall through */;
 349        }
 350
 351        if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
 352                return BFIN_MEM_ACCESS_CORE;
 353        if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
 354                return BFIN_MEM_ACCESS_DMA;
 355
 356        return -EFAULT;
 357}
 358
 359#if defined(CONFIG_ACCESS_CHECK)
 360#ifdef CONFIG_ACCESS_OK_L1
 361__attribute__((l1_text))
 362#endif
 363/* Return 1 if access to memory range is OK, 0 otherwise */
 364int _access_ok(unsigned long addr, unsigned long size)
 365{
 366        int aret;
 367
 368        if (size == 0)
 369                return 1;
 370        /* Check that things do not wrap around */
 371        if (addr > ULONG_MAX - size)
 372                return 0;
 373        if (uaccess_kernel())
 374                return 1;
 375#ifdef CONFIG_MTD_UCLINUX
 376        if (1)
 377#else
 378        if (0)
 379#endif
 380        {
 381                if (in_mem(addr, size, memory_start, memory_end))
 382                        return 1;
 383                if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
 384                        return 1;
 385# ifndef CONFIG_ROMFS_ON_MTD
 386                if (0)
 387# endif
 388                        /* For XIP, allow user space to use pointers within the ROMFS.  */
 389                        if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
 390                                return 1;
 391        } else {
 392                if (in_mem(addr, size, memory_start, physical_mem_end))
 393                        return 1;
 394        }
 395
 396        if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
 397                return 1;
 398
 399        if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
 400                return 1;
 401        if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
 402                return 1;
 403        if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
 404                return 1;
 405        if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
 406                return 1;
 407#ifdef COREB_L1_CODE_START
 408        if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
 409                return 1;
 410        if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
 411                return 1;
 412        if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
 413                return 1;
 414        if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
 415                return 1;
 416#endif
 417
 418#ifndef CONFIG_EXCEPTION_L1_SCRATCH
 419        if (in_mem_const(addr, size, (unsigned long)l1_stack_base, l1_stack_len))
 420                return 1;
 421#endif
 422
 423        aret = in_async(addr, size);
 424        if (aret < 2)
 425                return aret;
 426
 427        if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
 428                return 1;
 429
 430        if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
 431                return 1;
 432        if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
 433                return 1;
 434
 435        return 0;
 436}
 437EXPORT_SYMBOL(_access_ok);
 438#endif /* CONFIG_ACCESS_CHECK */
 439