1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#ifndef _ASM_TILE_PERCPU_H 16#define _ASM_TILE_PERCPU_H 17 18register unsigned long my_cpu_offset_reg asm("tp"); 19 20#ifdef CONFIG_PREEMPT 21/* 22 * For full preemption, we can't just use the register variable 23 * directly, since we need barrier() to hazard against it, causing the 24 * compiler to reload anything computed from a previous "tp" value. 25 * But we also don't want to use volatile asm, since we'd like the 26 * compiler to be able to cache the value across multiple percpu reads. 27 * So we use a fake stack read as a hazard against barrier(). 28 * The 'U' constraint is like 'm' but disallows postincrement. 29 */ 30static inline unsigned long __my_cpu_offset(void) 31{ 32 unsigned long tp; 33 register unsigned long *sp asm("sp"); 34 asm("move %0, tp" : "=r" (tp) : "U" (*sp)); 35 return tp; 36} 37#define __my_cpu_offset __my_cpu_offset() 38#else 39/* 40 * We don't need to hazard against barrier() since "tp" doesn't ever 41 * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only 42 * changes at function call points, at which we are already re-reading 43 * the value of "tp" due to "my_cpu_offset_reg" being a global variable. 44 */ 45#define __my_cpu_offset my_cpu_offset_reg 46#endif 47 48#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp)) 49 50#include <asm-generic/percpu.h> 51 52#endif /* _ASM_TILE_PERCPU_H */ 53