linux/arch/tile/lib/atomic_32.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#include <linux/cache.h>
  16#include <linux/delay.h>
  17#include <linux/uaccess.h>
  18#include <linux/module.h>
  19#include <linux/mm.h>
  20#include <asm/atomic.h>
  21#include <asm/futex.h>
  22#include <arch/chip.h>
  23
  24/* See <asm/atomic_32.h> */
  25#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
  26
  27/*
  28 * A block of memory containing locks for atomic ops. Each instance of this
  29 * struct will be homed on a different CPU.
  30 */
  31struct atomic_locks_on_cpu {
  32        int lock[ATOMIC_HASH_L2_SIZE];
  33} __attribute__((aligned(ATOMIC_HASH_L2_SIZE * 4)));
  34
  35static DEFINE_PER_CPU(struct atomic_locks_on_cpu, atomic_lock_pool);
  36
  37/* The locks we'll use until __init_atomic_per_cpu is called. */
  38static struct atomic_locks_on_cpu __initdata initial_atomic_locks;
  39
  40/* Hash into this vector to get a pointer to lock for the given atomic. */
  41struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
  42        __write_once = {
  43        [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks)
  44};
  45
  46#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
  47
  48/* This page is remapped on startup to be hash-for-home. */
  49int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
  50
  51#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
  52
  53static inline int *__atomic_hashed_lock(volatile void *v)
  54{
  55        /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
  56#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
  57        unsigned long i =
  58                (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long));
  59        unsigned long n = __insn_crc32_32(0, i);
  60
  61        /* Grab high bits for L1 index. */
  62        unsigned long l1_index = n >> ((sizeof(n) * 8) - ATOMIC_HASH_L1_SHIFT);
  63        /* Grab low bits for L2 index. */
  64        unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1);
  65
  66        return &atomic_lock_ptr[l1_index]->lock[l2_index];
  67#else
  68        /*
  69         * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
  70         * Using mm works here because atomic_locks is page aligned.
  71         */
  72        unsigned long ptr = __insn_mm((unsigned long)v >> 1,
  73                                      (unsigned long)atomic_locks,
  74                                      2, (ATOMIC_HASH_SHIFT + 2) - 1);
  75        return (int *)ptr;
  76#endif
  77}
  78
  79#ifdef CONFIG_SMP
  80/* Return whether the passed pointer is a valid atomic lock pointer. */
  81static int is_atomic_lock(int *p)
  82{
  83#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
  84        int i;
  85        for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
  86
  87                if (p >= &atomic_lock_ptr[i]->lock[0] &&
  88                    p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) {
  89                        return 1;
  90                }
  91        }
  92        return 0;
  93#else
  94        return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
  95#endif
  96}
  97
  98void __atomic_fault_unlock(int *irqlock_word)
  99{
 100        BUG_ON(!is_atomic_lock(irqlock_word));
 101        BUG_ON(*irqlock_word != 1);
 102        *irqlock_word = 0;
 103}
 104
 105#endif /* CONFIG_SMP */
 106
 107static inline int *__atomic_setup(volatile void *v)
 108{
 109        /* Issue a load to the target to bring it into cache. */
 110        *(volatile int *)v;
 111        return __atomic_hashed_lock(v);
 112}
 113
 114int _atomic_xchg(atomic_t *v, int n)
 115{
 116        return __atomic_xchg(&v->counter, __atomic_setup(v), n).val;
 117}
 118EXPORT_SYMBOL(_atomic_xchg);
 119
 120int _atomic_xchg_add(atomic_t *v, int i)
 121{
 122        return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val;
 123}
 124EXPORT_SYMBOL(_atomic_xchg_add);
 125
 126int _atomic_xchg_add_unless(atomic_t *v, int a, int u)
 127{
 128        /*
 129         * Note: argument order is switched here since it is easier
 130         * to use the first argument consistently as the "old value"
 131         * in the assembly, as is done for _atomic_cmpxchg().
 132         */
 133        return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a)
 134                .val;
 135}
 136EXPORT_SYMBOL(_atomic_xchg_add_unless);
 137
 138int _atomic_cmpxchg(atomic_t *v, int o, int n)
 139{
 140        return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val;
 141}
 142EXPORT_SYMBOL(_atomic_cmpxchg);
 143
 144unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
 145{
 146        return __atomic_or((int *)p, __atomic_setup(p), mask).val;
 147}
 148EXPORT_SYMBOL(_atomic_or);
 149
 150unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
 151{
 152        return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
 153}
 154EXPORT_SYMBOL(_atomic_andn);
 155
 156unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
 157{
 158        return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
 159}
 160EXPORT_SYMBOL(_atomic_xor);
 161
 162
 163u64 _atomic64_xchg(atomic64_t *v, u64 n)
 164{
 165        return __atomic64_xchg(&v->counter, __atomic_setup(v), n);
 166}
 167EXPORT_SYMBOL(_atomic64_xchg);
 168
 169u64 _atomic64_xchg_add(atomic64_t *v, u64 i)
 170{
 171        return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i);
 172}
 173EXPORT_SYMBOL(_atomic64_xchg_add);
 174
 175u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u)
 176{
 177        /*
 178         * Note: argument order is switched here since it is easier
 179         * to use the first argument consistently as the "old value"
 180         * in the assembly, as is done for _atomic_cmpxchg().
 181         */
 182        return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v),
 183                                          u, a);
 184}
 185EXPORT_SYMBOL(_atomic64_xchg_add_unless);
 186
 187u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
 188{
 189        return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n);
 190}
 191EXPORT_SYMBOL(_atomic64_cmpxchg);
 192
 193
 194static inline int *__futex_setup(int __user *v)
 195{
 196        /*
 197         * Issue a prefetch to the counter to bring it into cache.
 198         * As for __atomic_setup, but we can't do a read into the L1
 199         * since it might fault; instead we do a prefetch into the L2.
 200         */
 201        __insn_prefetch(v);
 202        return __atomic_hashed_lock((int __force *)v);
 203}
 204
 205struct __get_user futex_set(u32 __user *v, int i)
 206{
 207        return __atomic_xchg((int __force *)v, __futex_setup(v), i);
 208}
 209
 210struct __get_user futex_add(u32 __user *v, int n)
 211{
 212        return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
 213}
 214
 215struct __get_user futex_or(u32 __user *v, int n)
 216{
 217        return __atomic_or((int __force *)v, __futex_setup(v), n);
 218}
 219
 220struct __get_user futex_andn(u32 __user *v, int n)
 221{
 222        return __atomic_andn((int __force *)v, __futex_setup(v), n);
 223}
 224
 225struct __get_user futex_xor(u32 __user *v, int n)
 226{
 227        return __atomic_xor((int __force *)v, __futex_setup(v), n);
 228}
 229
 230struct __get_user futex_cmpxchg(u32 __user *v, int o, int n)
 231{
 232        return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
 233}
 234
 235/*
 236 * If any of the atomic or futex routines hit a bad address (not in
 237 * the page tables at kernel PL) this routine is called.  The futex
 238 * routines are never used on kernel space, and the normal atomics and
 239 * bitops are never used on user space.  So a fault on kernel space
 240 * must be fatal, but a fault on userspace is a futex fault and we
 241 * need to return -EFAULT.  Note that the context this routine is
 242 * invoked in is the context of the "_atomic_xxx()" routines called
 243 * by the functions in this file.
 244 */
 245struct __get_user __atomic_bad_address(int __user *addr)
 246{
 247        if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
 248                panic("Bad address used for kernel atomic op: %p\n", addr);
 249        return (struct __get_user) { .err = -EFAULT };
 250}
 251
 252
 253#if CHIP_HAS_CBOX_HOME_MAP()
 254static int __init noatomichash(char *str)
 255{
 256        pr_warning("noatomichash is deprecated.\n");
 257        return 1;
 258}
 259__setup("noatomichash", noatomichash);
 260#endif
 261
 262void __init __init_atomic_per_cpu(void)
 263{
 264#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
 265
 266        unsigned int i;
 267        int actual_cpu;
 268
 269        /*
 270         * Before this is called from setup, we just have one lock for
 271         * all atomic objects/operations.  Here we replace the
 272         * elements of atomic_lock_ptr so that they point at per_cpu
 273         * integers.  This seemingly over-complex approach stems from
 274         * the fact that DEFINE_PER_CPU defines an entry for each cpu
 275         * in the grid, not each cpu from 0..ATOMIC_HASH_SIZE-1.  But
 276         * for efficient hashing of atomics to their locks we want a
 277         * compile time constant power of 2 for the size of this
 278         * table, so we use ATOMIC_HASH_SIZE.
 279         *
 280         * Here we populate atomic_lock_ptr from the per cpu
 281         * atomic_lock_pool, interspersing by actual cpu so that
 282         * subsequent elements are homed on consecutive cpus.
 283         */
 284
 285        actual_cpu = cpumask_first(cpu_possible_mask);
 286
 287        for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
 288                /*
 289                 * Preincrement to slightly bias against using cpu 0,
 290                 * which has plenty of stuff homed on it already.
 291                 */
 292                actual_cpu = cpumask_next(actual_cpu, cpu_possible_mask);
 293                if (actual_cpu >= nr_cpu_ids)
 294                        actual_cpu = cpumask_first(cpu_possible_mask);
 295
 296                atomic_lock_ptr[i] = &per_cpu(atomic_lock_pool, actual_cpu);
 297        }
 298
 299#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 300
 301        /* Validate power-of-two and "bigger than cpus" assumption */
 302        BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
 303        BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
 304
 305        /*
 306         * On TILEPro we prefer to use a single hash-for-home
 307         * page, since this means atomic operations are less
 308         * likely to encounter a TLB fault and thus should
 309         * in general perform faster.  You may wish to disable
 310         * this in situations where few hash-for-home tiles
 311         * are configured.
 312         */
 313        BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
 314
 315        /* The locks must all fit on one page. */
 316        BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
 317
 318        /*
 319         * We use the page offset of the atomic value's address as
 320         * an index into atomic_locks, excluding the low 3 bits.
 321         * That should not produce more indices than ATOMIC_HASH_SIZE.
 322         */
 323        BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
 324
 325#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 326
 327        /* The futex code makes this assumption, so we validate it here. */
 328        BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
 329}
 330