linux/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
<<
>>
Prefs
   1/*
   2 * SMP/VPE-safe functions to access "registers" (see note).
   3 *
   4 * NOTES:
   5* - These macros use ll/sc instructions, so it is your responsibility to
   6 * ensure these are available on your platform before including this file.
   7 * - The MIPS32 spec states that ll/sc results are undefined for uncached
   8 * accesses. This means they can't be used on HW registers accessed
   9 * through kseg1. Code which requires these macros for this purpose must
  10 * front-end the registers with cached memory "registers" and have a single
  11 * thread update the actual HW registers.
  12 * - A maximum of 2k of code can be inserted between ll and sc. Every
  13 * memory accesses between the instructions will increase the chance of
  14 * sc failing and having to loop.
  15 * - When using custom_read_reg32/custom_write_reg32 only perform the
  16 * necessary logical operations on the register value in between these
  17 * two calls. All other logic should be performed before the first call.
  18  * - There is a bug on the R10000 chips which has a workaround. If you
  19 * are affected by this bug, make sure to define the symbol 'R10000_LLSC_WAR'
  20 * to be non-zero.  If you are using this header from within linux, you may
  21 * include <asm/war.h> before including this file to have this defined
  22 * appropriately for you.
  23 *
  24 * Copyright 2005-2007 PMC-Sierra, Inc.
  25 *
  26 *  This program is free software; you can redistribute  it and/or modify it
  27 *  under  the terms of  the GNU General  Public License as published by the
  28 *  Free Software Foundation;  either version 2 of the  License, or (at your
  29 *  option) any later version.
  30 *
  31 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
  32 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
  33 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
  34 *  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
  35 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  36 *  LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF USE,
  37 *  DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  38 *  THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
  39 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  40 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  41 *
  42 *  You should have received a copy of the  GNU General Public License along
  43 *  with this program; if not, write  to the Free Software Foundation, Inc., 675
  44 *  Mass Ave, Cambridge, MA 02139, USA.
  45 */
  46
  47#ifndef __ASM_REGOPS_H__
  48#define __ASM_REGOPS_H__
  49
  50#include <linux/types.h>
  51
  52#include <asm/compiler.h>
  53#include <asm/war.h>
  54
  55#ifndef R10000_LLSC_WAR
  56#define R10000_LLSC_WAR 0
  57#endif
  58
  59#if R10000_LLSC_WAR == 1
  60#define __beqz  "beqzl  "
  61#else
  62#define __beqz  "beqz   "
  63#endif
  64
  65#ifndef _LINUX_TYPES_H
  66typedef unsigned int u32;
  67#endif
  68
  69/*
  70 * Sets all the masked bits to the corresponding value bits
  71 */
  72static inline void set_value_reg32(volatile u32 *const addr,
  73                                        u32 const mask,
  74                                        u32 const value)
  75{
  76        u32 temp;
  77
  78        __asm__ __volatile__(
  79        "       .set    push                            \n"
  80        "       .set    arch=r4000                      \n"
  81        "1:     ll      %0, %1  # set_value_reg32       \n"
  82        "       and     %0, %2                          \n"
  83        "       or      %0, %3                          \n"
  84        "       sc      %0, %1                          \n"
  85        "       "__beqz"%0, 1b                          \n"
  86        "       nop                                     \n"
  87        "       .set    pop                             \n"
  88        : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
  89        : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
  90}
  91
  92/*
  93 * Sets all the masked bits to '1'
  94 */
  95static inline void set_reg32(volatile u32 *const addr,
  96                                u32 const mask)
  97{
  98        u32 temp;
  99
 100        __asm__ __volatile__(
 101        "       .set    push                            \n"
 102        "       .set    arch=r4000                      \n"
 103        "1:     ll      %0, %1          # set_reg32     \n"
 104        "       or      %0, %2                          \n"
 105        "       sc      %0, %1                          \n"
 106        "       "__beqz"%0, 1b                          \n"
 107        "       nop                                     \n"
 108        "       .set    pop                             \n"
 109        : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
 110        : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 111}
 112
 113/*
 114 * Sets all the masked bits to '0'
 115 */
 116static inline void clear_reg32(volatile u32 *const addr,
 117                                u32 const mask)
 118{
 119        u32 temp;
 120
 121        __asm__ __volatile__(
 122        "       .set    push                            \n"
 123        "       .set    arch=r4000                      \n"
 124        "1:     ll      %0, %1          # clear_reg32   \n"
 125        "       and     %0, %2                          \n"
 126        "       sc      %0, %1                          \n"
 127        "       "__beqz"%0, 1b                          \n"
 128        "       nop                                     \n"
 129        "       .set    pop                             \n"
 130        : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
 131        : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
 132}
 133
 134/*
 135 * Toggles all masked bits from '0' to '1' and '1' to '0'
 136 */
 137static inline void toggle_reg32(volatile u32 *const addr,
 138                                u32 const mask)
 139{
 140        u32 temp;
 141
 142        __asm__ __volatile__(
 143        "       .set    push                            \n"
 144        "       .set    arch=r4000                      \n"
 145        "1:     ll      %0, %1          # toggle_reg32  \n"
 146        "       xor     %0, %2                          \n"
 147        "       sc      %0, %1                          \n"
 148        "       "__beqz"%0, 1b                          \n"
 149        "       nop                                     \n"
 150        "       .set    pop                             \n"
 151        : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
 152        : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 153}
 154
 155/*
 156 * Read all masked bits others are returned as '0'
 157 */
 158static inline u32 read_reg32(volatile u32 *const addr,
 159                                u32 const mask)
 160{
 161        u32 temp;
 162
 163        __asm__ __volatile__(
 164        "       .set    push                            \n"
 165        "       .set    noreorder                       \n"
 166        "       lw      %0, %1          # read          \n"
 167        "       and     %0, %2          # mask          \n"
 168        "       .set    pop                             \n"
 169        : "=&r" (temp)
 170        : "m" (*addr), "ir" (mask));
 171
 172        return temp;
 173}
 174
 175/*
 176 * blocking_read_reg32 - Read address with blocking load
 177 *
 178 * Uncached writes need to be read back to ensure they reach RAM.
 179 * The returned value must be 'used' to prevent from becoming a
 180 * non-blocking load.
 181 */
 182static inline u32 blocking_read_reg32(volatile u32 *const addr)
 183{
 184        u32 temp;
 185
 186        __asm__ __volatile__(
 187        "       .set    push                            \n"
 188        "       .set    noreorder                       \n"
 189        "       lw      %0, %1          # read          \n"
 190        "       move    %0, %0          # block         \n"
 191        "       .set    pop                             \n"
 192        : "=&r" (temp)
 193        : "m" (*addr));
 194
 195        return temp;
 196}
 197
 198/*
 199 * For special strange cases only:
 200 *
 201 * If you need custom processing within a ll/sc loop, use the following macros
 202 * VERY CAREFULLY:
 203 *
 204 *   u32 tmp;                           <-- Define a variable to hold the data
 205 *
 206 *   custom_read_reg32(address, tmp);   <-- Reads the address and put the value
 207 *                                              in the 'tmp' variable given
 208 *
 209 *      From here on out, you are (basically) atomic, so don't do anything too
 210 *      fancy!
 211 *      Also, this code may loop if the end of this block fails to write
 212 *      everything back safely due do the other CPU, so do NOT do anything
 213 *      with side-effects!
 214 *
 215 *   custom_write_reg32(address, tmp);  <-- Writes back 'tmp' safely.
 216 */
 217#define custom_read_reg32(address, tmp)                         \
 218        __asm__ __volatile__(                                   \
 219        "       .set    push                            \n"     \
 220        "       .set    arch=r4000                      \n"     \
 221        "1:     ll      %0, %1  #custom_read_reg32      \n"     \
 222        "       .set    pop                             \n"     \
 223        : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)                \
 224        : GCC_OFF_SMALL_ASM() (*address))
 225
 226#define custom_write_reg32(address, tmp)                        \
 227        __asm__ __volatile__(                                   \
 228        "       .set    push                            \n"     \
 229        "       .set    arch=r4000                      \n"     \
 230        "       sc      %0, %1  #custom_write_reg32     \n"     \
 231        "       "__beqz"%0, 1b                          \n"     \
 232        "       nop                                     \n"     \
 233        "       .set    pop                             \n"     \
 234        : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)               \
 235        : "0" (tmp), GCC_OFF_SMALL_ASM() (*address))
 236
 237#endif  /* __ASM_REGOPS_H__ */
 238