linux/arch/blackfin/kernel/process.c
<<
>>
Prefs
   1/*
   2 * Blackfin architecture-dependent process handling
   3 *
   4 * Copyright 2004-2009 Analog Devices Inc.
   5 *
   6 * Licensed under the GPL-2 or later
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/unistd.h>
  11#include <linux/user.h>
  12#include <linux/uaccess.h>
  13#include <linux/slab.h>
  14#include <linux/sched.h>
  15#include <linux/tick.h>
  16#include <linux/fs.h>
  17#include <linux/err.h>
  18
  19#include <asm/blackfin.h>
  20#include <asm/fixed_code.h>
  21#include <asm/mem_map.h>
  22#include <asm/irq.h>
  23
  24asmlinkage void ret_from_fork(void);
  25
  26/* Points to the SDRAM backup memory for the stack that is currently in
  27 * L1 scratchpad memory.
  28 */
  29void *current_l1_stack_save;
  30
  31/* The number of tasks currently using a L1 stack area.  The SRAM is
  32 * allocated/deallocated whenever this changes from/to zero.
  33 */
  34int nr_l1stack_tasks;
  35
  36/* Start and length of the area in L1 scratchpad memory which we've allocated
  37 * for process stacks.
  38 */
  39void *l1_stack_base;
  40unsigned long l1_stack_len;
  41
  42void (*pm_power_off)(void) = NULL;
  43EXPORT_SYMBOL(pm_power_off);
  44
  45/*
  46 * The idle loop on BFIN
  47 */
  48#ifdef CONFIG_IDLE_L1
  49void arch_cpu_idle(void)__attribute__((l1_text));
  50#endif
  51
  52/*
  53 * This is our default idle handler.  We need to disable
  54 * interrupts here to ensure we don't miss a wakeup call.
  55 */
  56void arch_cpu_idle(void)
  57{
  58#ifdef CONFIG_IPIPE
  59        ipipe_suspend_domain();
  60#endif
  61        hard_local_irq_disable();
  62        if (!need_resched())
  63                idle_with_irq_disabled();
  64
  65        hard_local_irq_enable();
  66}
  67
  68#ifdef CONFIG_HOTPLUG_CPU
  69void arch_cpu_idle_dead(void)
  70{
  71        cpu_die();
  72}
  73#endif
  74
  75/*
  76 * Do necessary setup to start up a newly executed thread.
  77 *
  78 * pass the data segment into user programs if it exists,
  79 * it can't hurt anything as far as I can tell
  80 */
  81void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
  82{
  83        regs->pc = new_ip;
  84        if (current->mm)
  85                regs->p5 = current->mm->start_data;
  86#ifndef CONFIG_SMP
  87        task_thread_info(current)->l1_task_info.stack_start =
  88                (void *)current->mm->context.stack_start;
  89        task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
  90        memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info,
  91               sizeof(*L1_SCRATCH_TASK_INFO));
  92#endif
  93        wrusp(new_sp);
  94}
  95EXPORT_SYMBOL_GPL(start_thread);
  96
  97void flush_thread(void)
  98{
  99}
 100
 101asmlinkage int bfin_clone(unsigned long clone_flags, unsigned long newsp)
 102{
 103#ifdef __ARCH_SYNC_CORE_DCACHE
 104        if (current->nr_cpus_allowed == num_possible_cpus())
 105                set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
 106#endif
 107        if (newsp)
 108                newsp -= 12;
 109        return do_fork(clone_flags, newsp, 0, NULL, NULL);
 110}
 111
 112int
 113copy_thread(unsigned long clone_flags,
 114            unsigned long usp, unsigned long topstk,
 115            struct task_struct *p)
 116{
 117        struct pt_regs *childregs;
 118        unsigned long *v;
 119
 120        childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
 121        v = ((unsigned long *)childregs) - 2;
 122        if (unlikely(p->flags & PF_KTHREAD)) {
 123                memset(childregs, 0, sizeof(struct pt_regs));
 124                v[0] = usp;
 125                v[1] = topstk;
 126                childregs->orig_p0 = -1;
 127                childregs->ipend = 0x8000;
 128                __asm__ __volatile__("%0 = syscfg;":"=da"(childregs->syscfg):);
 129                p->thread.usp = 0;
 130        } else {
 131                *childregs = *current_pt_regs();
 132                childregs->r0 = 0;
 133                p->thread.usp = usp ? : rdusp();
 134                v[0] = v[1] = 0;
 135        }
 136
 137        p->thread.ksp = (unsigned long)v;
 138        p->thread.pc = (unsigned long)ret_from_fork;
 139
 140        return 0;
 141}
 142
 143unsigned long get_wchan(struct task_struct *p)
 144{
 145        unsigned long fp, pc;
 146        unsigned long stack_page;
 147        int count = 0;
 148        if (!p || p == current || p->state == TASK_RUNNING)
 149                return 0;
 150
 151        stack_page = (unsigned long)p;
 152        fp = p->thread.usp;
 153        do {
 154                if (fp < stack_page + sizeof(struct thread_info) ||
 155                    fp >= 8184 + stack_page)
 156                        return 0;
 157                pc = ((unsigned long *)fp)[1];
 158                if (!in_sched_functions(pc))
 159                        return pc;
 160                fp = *(unsigned long *)fp;
 161        }
 162        while (count++ < 16);
 163        return 0;
 164}
 165
 166void finish_atomic_sections (struct pt_regs *regs)
 167{
 168        int __user *up0 = (int __user *)regs->p0;
 169
 170        switch (regs->pc) {
 171        default:
 172                /* not in middle of an atomic step, so resume like normal */
 173                return;
 174
 175        case ATOMIC_XCHG32 + 2:
 176                put_user(regs->r1, up0);
 177                break;
 178
 179        case ATOMIC_CAS32 + 2:
 180        case ATOMIC_CAS32 + 4:
 181                if (regs->r0 == regs->r1)
 182        case ATOMIC_CAS32 + 6:
 183                        put_user(regs->r2, up0);
 184                break;
 185
 186        case ATOMIC_ADD32 + 2:
 187                regs->r0 = regs->r1 + regs->r0;
 188                /* fall through */
 189        case ATOMIC_ADD32 + 4:
 190                put_user(regs->r0, up0);
 191                break;
 192
 193        case ATOMIC_SUB32 + 2:
 194                regs->r0 = regs->r1 - regs->r0;
 195                /* fall through */
 196        case ATOMIC_SUB32 + 4:
 197                put_user(regs->r0, up0);
 198                break;
 199
 200        case ATOMIC_IOR32 + 2:
 201                regs->r0 = regs->r1 | regs->r0;
 202                /* fall through */
 203        case ATOMIC_IOR32 + 4:
 204                put_user(regs->r0, up0);
 205                break;
 206
 207        case ATOMIC_AND32 + 2:
 208                regs->r0 = regs->r1 & regs->r0;
 209                /* fall through */
 210        case ATOMIC_AND32 + 4:
 211                put_user(regs->r0, up0);
 212                break;
 213
 214        case ATOMIC_XOR32 + 2:
 215                regs->r0 = regs->r1 ^ regs->r0;
 216                /* fall through */
 217        case ATOMIC_XOR32 + 4:
 218                put_user(regs->r0, up0);
 219                break;
 220        }
 221
 222        /*
 223         * We've finished the atomic section, and the only thing left for
 224         * userspace is to do a RTS, so we might as well handle that too
 225         * since we need to update the PC anyways.
 226         */
 227        regs->pc = regs->rets;
 228}
 229
 230static inline
 231int in_mem(unsigned long addr, unsigned long size,
 232           unsigned long start, unsigned long end)
 233{
 234        return addr >= start && addr + size <= end;
 235}
 236static inline
 237int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
 238                     unsigned long const_addr, unsigned long const_size)
 239{
 240        return const_size &&
 241               in_mem(addr, size, const_addr + off, const_addr + const_size);
 242}
 243static inline
 244int in_mem_const(unsigned long addr, unsigned long size,
 245                 unsigned long const_addr, unsigned long const_size)
 246{
 247        return in_mem_const_off(addr, size, 0, const_addr, const_size);
 248}
 249#ifdef CONFIG_BF60x
 250#define ASYNC_ENABLED(bnum, bctlnum)    1
 251#else
 252#define ASYNC_ENABLED(bnum, bctlnum) \
 253({ \
 254        (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
 255        bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
 256        1; \
 257})
 258#endif
 259/*
 260 * We can't read EBIU banks that aren't enabled or we end up hanging
 261 * on the access to the async space.  Make sure we validate accesses
 262 * that cross async banks too.
 263 *      0 - found, but unusable
 264 *      1 - found & usable
 265 *      2 - not found
 266 */
 267static
 268int in_async(unsigned long addr, unsigned long size)
 269{
 270        if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE) {
 271                if (!ASYNC_ENABLED(0, 0))
 272                        return 0;
 273                if (addr + size <= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)
 274                        return 1;
 275                size -= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE - addr;
 276                addr = ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE;
 277        }
 278        if (addr >= ASYNC_BANK1_BASE && addr < ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE) {
 279                if (!ASYNC_ENABLED(1, 0))
 280                        return 0;
 281                if (addr + size <= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)
 282                        return 1;
 283                size -= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE - addr;
 284                addr = ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE;
 285        }
 286        if (addr >= ASYNC_BANK2_BASE && addr < ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE) {
 287                if (!ASYNC_ENABLED(2, 1))
 288                        return 0;
 289                if (addr + size <= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE)
 290                        return 1;
 291                size -= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE - addr;
 292                addr = ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE;
 293        }
 294        if (addr >= ASYNC_BANK3_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
 295                if (ASYNC_ENABLED(3, 1))
 296                        return 0;
 297                if (addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
 298                        return 1;
 299                return 0;
 300        }
 301
 302        /* not within async bounds */
 303        return 2;
 304}
 305
 306int bfin_mem_access_type(unsigned long addr, unsigned long size)
 307{
 308        int cpu = raw_smp_processor_id();
 309
 310        /* Check that things do not wrap around */
 311        if (addr > ULONG_MAX - size)
 312                return -EFAULT;
 313
 314        if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
 315                return BFIN_MEM_ACCESS_CORE;
 316
 317        if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
 318                return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
 319        if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
 320                return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
 321        if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
 322                return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
 323        if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
 324                return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
 325#ifdef COREB_L1_CODE_START
 326        if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
 327                return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
 328        if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
 329                return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
 330        if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
 331                return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
 332        if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
 333                return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
 334#endif
 335        if (in_mem_const(addr, size, L2_START, L2_LENGTH))
 336                return BFIN_MEM_ACCESS_CORE;
 337
 338        if (addr >= SYSMMR_BASE)
 339                return BFIN_MEM_ACCESS_CORE_ONLY;
 340
 341        switch (in_async(addr, size)) {
 342        case 0: return -EFAULT;
 343        case 1: return BFIN_MEM_ACCESS_CORE;
 344        case 2: /* fall through */;
 345        }
 346
 347        if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
 348                return BFIN_MEM_ACCESS_CORE;
 349        if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
 350                return BFIN_MEM_ACCESS_DMA;
 351
 352        return -EFAULT;
 353}
 354
 355#if defined(CONFIG_ACCESS_CHECK)
 356#ifdef CONFIG_ACCESS_OK_L1
 357__attribute__((l1_text))
 358#endif
 359/* Return 1 if access to memory range is OK, 0 otherwise */
 360int _access_ok(unsigned long addr, unsigned long size)
 361{
 362        int aret;
 363
 364        if (size == 0)
 365                return 1;
 366        /* Check that things do not wrap around */
 367        if (addr > ULONG_MAX - size)
 368                return 0;
 369        if (segment_eq(get_fs(), KERNEL_DS))
 370                return 1;
 371#ifdef CONFIG_MTD_UCLINUX
 372        if (1)
 373#else
 374        if (0)
 375#endif
 376        {
 377                if (in_mem(addr, size, memory_start, memory_end))
 378                        return 1;
 379                if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
 380                        return 1;
 381# ifndef CONFIG_ROMFS_ON_MTD
 382                if (0)
 383# endif
 384                        /* For XIP, allow user space to use pointers within the ROMFS.  */
 385                        if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
 386                                return 1;
 387        } else {
 388                if (in_mem(addr, size, memory_start, physical_mem_end))
 389                        return 1;
 390        }
 391
 392        if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
 393                return 1;
 394
 395        if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
 396                return 1;
 397        if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
 398                return 1;
 399        if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
 400                return 1;
 401        if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
 402                return 1;
 403#ifdef COREB_L1_CODE_START
 404        if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
 405                return 1;
 406        if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
 407                return 1;
 408        if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
 409                return 1;
 410        if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
 411                return 1;
 412#endif
 413
 414#ifndef CONFIG_EXCEPTION_L1_SCRATCH
 415        if (in_mem_const(addr, size, (unsigned long)l1_stack_base, l1_stack_len))
 416                return 1;
 417#endif
 418
 419        aret = in_async(addr, size);
 420        if (aret < 2)
 421                return aret;
 422
 423        if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
 424                return 1;
 425
 426        if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
 427                return 1;
 428        if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
 429                return 1;
 430
 431        return 0;
 432}
 433EXPORT_SYMBOL(_access_ok);
 434#endif /* CONFIG_ACCESS_CHECK */
 435