linux/include/asm-alpha/system.h
<<
>>
Prefs
   1#ifndef __ALPHA_SYSTEM_H
   2#define __ALPHA_SYSTEM_H
   3
   4#include <asm/pal.h>
   5#include <asm/page.h>
   6#include <asm/barrier.h>
   7
   8/*
   9 * System defines.. Note that this is included both from .c and .S
  10 * files, so it does only defines, not any C code.
  11 */
  12
  13/*
  14 * We leave one page for the initial stack page, and one page for
  15 * the initial process structure. Also, the console eats 3 MB for
  16 * the initial bootloader (one of which we can reclaim later).
  17 */
  18#define BOOT_PCB        0x20000000
  19#define BOOT_ADDR       0x20000000
  20/* Remove when official MILO sources have ELF support: */
  21#define BOOT_SIZE       (16*1024)
  22
  23#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
  24#define KERNEL_START_PHYS       0x300000 /* Old bootloaders hardcoded this.  */
  25#else
  26#define KERNEL_START_PHYS       0x1000000 /* required: Wildfire/Titan/Marvel */
  27#endif
  28
  29#define KERNEL_START    (PAGE_OFFSET+KERNEL_START_PHYS)
  30#define SWAPPER_PGD     KERNEL_START
  31#define INIT_STACK      (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
  32#define EMPTY_PGT       (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
  33#define EMPTY_PGE       (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
  34#define ZERO_PGE        (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
  35
  36#define START_ADDR      (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
  37
  38/*
  39 * This is setup by the secondary bootstrap loader.  Because
  40 * the zero page is zeroed out as soon as the vm system is
  41 * initialized, we need to copy things out into a more permanent
  42 * place.
  43 */
  44#define PARAM                   ZERO_PGE
  45#define COMMAND_LINE            ((char*)(PARAM + 0x0000))
  46#define INITRD_START            (*(unsigned long *) (PARAM+0x100))
  47#define INITRD_SIZE             (*(unsigned long *) (PARAM+0x108))
  48
  49#ifndef __ASSEMBLY__
  50#include <linux/kernel.h>
  51#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */
  52
  53/*
  54 * This is the logout header that should be common to all platforms
  55 * (assuming they are running OSF/1 PALcode, I guess).
  56 */
  57struct el_common {
  58        unsigned int    size;           /* size in bytes of logout area */
  59        unsigned int    sbz1    : 30;   /* should be zero */
  60        unsigned int    err2    :  1;   /* second error */
  61        unsigned int    retry   :  1;   /* retry flag */
  62        unsigned int    proc_offset;    /* processor-specific offset */
  63        unsigned int    sys_offset;     /* system-specific offset */
  64        unsigned int    code;           /* machine check code */
  65        unsigned int    frame_rev;      /* frame revision */
  66};
  67
  68/* Machine Check Frame for uncorrectable errors (Large format)
  69 *      --- This is used to log uncorrectable errors such as
  70 *          double bit ECC errors.
  71 *      --- These errors are detected by both processor and systems.
  72 */
  73struct el_common_EV5_uncorrectable_mcheck {
  74        unsigned long   shadow[8];        /* Shadow reg. 8-14, 25           */
  75        unsigned long   paltemp[24];      /* PAL TEMP REGS.                 */
  76        unsigned long   exc_addr;         /* Address of excepting instruction*/
  77        unsigned long   exc_sum;          /* Summary of arithmetic traps.   */
  78        unsigned long   exc_mask;         /* Exception mask (from exc_sum). */
  79        unsigned long   pal_base;         /* Base address for PALcode.      */
  80        unsigned long   isr;              /* Interrupt Status Reg.          */
  81        unsigned long   icsr;             /* CURRENT SETUP OF EV5 IBOX      */
  82        unsigned long   ic_perr_stat;     /* I-CACHE Reg. <11> set Data parity
  83                                                         <12> set TAG parity*/
  84        unsigned long   dc_perr_stat;     /* D-CACHE error Reg. Bits set to 1:
  85                                                     <2> Data error in bank 0
  86                                                     <3> Data error in bank 1
  87                                                     <4> Tag error in bank 0
  88                                                     <5> Tag error in bank 1 */
  89        unsigned long   va;               /* Effective VA of fault or miss. */
  90        unsigned long   mm_stat;          /* Holds the reason for D-stream 
  91                                             fault or D-cache parity errors */
  92        unsigned long   sc_addr;          /* Address that was being accessed
  93                                             when EV5 detected Secondary cache
  94                                             failure.                 */
  95        unsigned long   sc_stat;          /* Helps determine if the error was
  96                                             TAG/Data parity(Secondary Cache)*/
  97        unsigned long   bc_tag_addr;      /* Contents of EV5 BC_TAG_ADDR    */
  98        unsigned long   ei_addr;          /* Physical address of any transfer
  99                                             that is logged in EV5 EI_STAT */
 100        unsigned long   fill_syndrome;    /* For correcting ECC errors.     */
 101        unsigned long   ei_stat;          /* Helps identify reason of any 
 102                                             processor uncorrectable error
 103                                             at its external interface.     */
 104        unsigned long   ld_lock;          /* Contents of EV5 LD_LOCK register*/
 105};
 106
 107struct el_common_EV6_mcheck {
 108        unsigned int FrameSize;         /* Bytes, including this field */
 109        unsigned int FrameFlags;        /* <31> = Retry, <30> = Second Error */
 110        unsigned int CpuOffset;         /* Offset to CPU-specific info */
 111        unsigned int SystemOffset;      /* Offset to system-specific info */
 112        unsigned int MCHK_Code;
 113        unsigned int MCHK_Frame_Rev;
 114        unsigned long I_STAT;           /* EV6 Internal Processor Registers */
 115        unsigned long DC_STAT;          /* (See the 21264 Spec) */
 116        unsigned long C_ADDR;
 117        unsigned long DC1_SYNDROME;
 118        unsigned long DC0_SYNDROME;
 119        unsigned long C_STAT;
 120        unsigned long C_STS;
 121        unsigned long MM_STAT;
 122        unsigned long EXC_ADDR;
 123        unsigned long IER_CM;
 124        unsigned long ISUM;
 125        unsigned long RESERVED0;
 126        unsigned long PAL_BASE;
 127        unsigned long I_CTL;
 128        unsigned long PCTX;
 129};
 130
 131extern void halt(void) __attribute__((noreturn));
 132#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
 133
 134#define switch_to(P,N,L)                                                 \
 135  do {                                                                   \
 136    (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
 137    check_mmu_context();                                                 \
 138  } while (0)
 139
 140struct task_struct;
 141extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
 142
 143#define imb() \
 144__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
 145
 146#define draina() \
 147__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
 148
 149enum implver_enum {
 150        IMPLVER_EV4,
 151        IMPLVER_EV5,
 152        IMPLVER_EV6
 153};
 154
 155#ifdef CONFIG_ALPHA_GENERIC
 156#define implver()                               \
 157({ unsigned long __implver;                     \
 158   __asm__ ("implver %0" : "=r"(__implver));    \
 159   (enum implver_enum) __implver; })
 160#else
 161/* Try to eliminate some dead code.  */
 162#ifdef CONFIG_ALPHA_EV4
 163#define implver() IMPLVER_EV4
 164#endif
 165#ifdef CONFIG_ALPHA_EV5
 166#define implver() IMPLVER_EV5
 167#endif
 168#if defined(CONFIG_ALPHA_EV6)
 169#define implver() IMPLVER_EV6
 170#endif
 171#endif
 172
 173enum amask_enum {
 174        AMASK_BWX = (1UL << 0),
 175        AMASK_FIX = (1UL << 1),
 176        AMASK_CIX = (1UL << 2),
 177        AMASK_MAX = (1UL << 8),
 178        AMASK_PRECISE_TRAP = (1UL << 9),
 179};
 180
 181#define amask(mask)                                             \
 182({ unsigned long __amask, __input = (mask);                     \
 183   __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input));     \
 184   __amask; })
 185
 186#define __CALL_PAL_R0(NAME, TYPE)                               \
 187static inline TYPE NAME(void)                                   \
 188{                                                               \
 189        register TYPE __r0 __asm__("$0");                       \
 190        __asm__ __volatile__(                                   \
 191                "call_pal %1 # " #NAME                          \
 192                :"=r" (__r0)                                    \
 193                :"i" (PAL_ ## NAME)                             \
 194                :"$1", "$16", "$22", "$23", "$24", "$25");      \
 195        return __r0;                                            \
 196}
 197
 198#define __CALL_PAL_W1(NAME, TYPE0)                              \
 199static inline void NAME(TYPE0 arg0)                             \
 200{                                                               \
 201        register TYPE0 __r16 __asm__("$16") = arg0;             \
 202        __asm__ __volatile__(                                   \
 203                "call_pal %1 # "#NAME                           \
 204                : "=r"(__r16)                                   \
 205                : "i"(PAL_ ## NAME), "0"(__r16)                 \
 206                : "$1", "$22", "$23", "$24", "$25");            \
 207}
 208
 209#define __CALL_PAL_W2(NAME, TYPE0, TYPE1)                       \
 210static inline void NAME(TYPE0 arg0, TYPE1 arg1)                 \
 211{                                                               \
 212        register TYPE0 __r16 __asm__("$16") = arg0;             \
 213        register TYPE1 __r17 __asm__("$17") = arg1;             \
 214        __asm__ __volatile__(                                   \
 215                "call_pal %2 # "#NAME                           \
 216                : "=r"(__r16), "=r"(__r17)                      \
 217                : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17)     \
 218                : "$1", "$22", "$23", "$24", "$25");            \
 219}
 220
 221#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0)                      \
 222static inline RTYPE NAME(TYPE0 arg0)                            \
 223{                                                               \
 224        register RTYPE __r0 __asm__("$0");                      \
 225        register TYPE0 __r16 __asm__("$16") = arg0;             \
 226        __asm__ __volatile__(                                   \
 227                "call_pal %2 # "#NAME                           \
 228                : "=r"(__r16), "=r"(__r0)                       \
 229                : "i"(PAL_ ## NAME), "0"(__r16)                 \
 230                : "$1", "$22", "$23", "$24", "$25");            \
 231        return __r0;                                            \
 232}
 233
 234#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1)               \
 235static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1)                \
 236{                                                               \
 237        register RTYPE __r0 __asm__("$0");                      \
 238        register TYPE0 __r16 __asm__("$16") = arg0;             \
 239        register TYPE1 __r17 __asm__("$17") = arg1;             \
 240        __asm__ __volatile__(                                   \
 241                "call_pal %3 # "#NAME                           \
 242                : "=r"(__r16), "=r"(__r17), "=r"(__r0)          \
 243                : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17)     \
 244                : "$1", "$22", "$23", "$24", "$25");            \
 245        return __r0;                                            \
 246}
 247
 248__CALL_PAL_W1(cflush, unsigned long);
 249__CALL_PAL_R0(rdmces, unsigned long);
 250__CALL_PAL_R0(rdps, unsigned long);
 251__CALL_PAL_R0(rdusp, unsigned long);
 252__CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
 253__CALL_PAL_R0(whami, unsigned long);
 254__CALL_PAL_W2(wrent, void*, unsigned long);
 255__CALL_PAL_W1(wripir, unsigned long);
 256__CALL_PAL_W1(wrkgp, unsigned long);
 257__CALL_PAL_W1(wrmces, unsigned long);
 258__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
 259__CALL_PAL_W1(wrusp, unsigned long);
 260__CALL_PAL_W1(wrvptptr, unsigned long);
 261
 262#define IPL_MIN         0
 263#define IPL_SW0         1
 264#define IPL_SW1         2
 265#define IPL_DEV0        3
 266#define IPL_DEV1        4
 267#define IPL_TIMER       5
 268#define IPL_PERF        6
 269#define IPL_POWERFAIL   6
 270#define IPL_MCHECK      7
 271#define IPL_MAX         7
 272
 273#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
 274#undef IPL_MIN
 275#define IPL_MIN         __min_ipl
 276extern int __min_ipl;
 277#endif
 278
 279#define getipl()                (rdps() & 7)
 280#define setipl(ipl)             ((void) swpipl(ipl))
 281
 282#define local_irq_disable()                     do { setipl(IPL_MAX); barrier(); } while(0)
 283#define local_irq_enable()                      do { barrier(); setipl(IPL_MIN); } while(0)
 284#define local_save_flags(flags) ((flags) = rdps())
 285#define local_irq_save(flags)   do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
 286#define local_irq_restore(flags)        do { barrier(); setipl(flags); barrier(); } while(0)
 287
 288#define irqs_disabled() (getipl() == IPL_MAX)
 289
 290/*
 291 * TB routines..
 292 */
 293#define __tbi(nr,arg,arg1...)                                   \
 294({                                                              \
 295        register unsigned long __r16 __asm__("$16") = (nr);     \
 296        register unsigned long __r17 __asm__("$17"); arg;       \
 297        __asm__ __volatile__(                                   \
 298                "call_pal %3 #__tbi"                            \
 299                :"=r" (__r16),"=r" (__r17)                      \
 300                :"0" (__r16),"i" (PAL_tbi) ,##arg1              \
 301                :"$0", "$1", "$22", "$23", "$24", "$25");       \
 302})
 303
 304#define tbi(x,y)        __tbi(x,__r17=(y),"1" (__r17))
 305#define tbisi(x)        __tbi(1,__r17=(x),"1" (__r17))
 306#define tbisd(x)        __tbi(2,__r17=(x),"1" (__r17))
 307#define tbis(x)         __tbi(3,__r17=(x),"1" (__r17))
 308#define tbiap()         __tbi(-1, /* no second argument */)
 309#define tbia()          __tbi(-2, /* no second argument */)
 310
 311/*
 312 * Atomic exchange.
 313 * Since it can be used to implement critical sections
 314 * it must clobber "memory" (also for interrupts in UP).
 315 */
 316
 317static inline unsigned long
 318__xchg_u8(volatile char *m, unsigned long val)
 319{
 320        unsigned long ret, tmp, addr64;
 321
 322        __asm__ __volatile__(
 323        "       andnot  %4,7,%3\n"
 324        "       insbl   %1,%4,%1\n"
 325        "1:     ldq_l   %2,0(%3)\n"
 326        "       extbl   %2,%4,%0\n"
 327        "       mskbl   %2,%4,%2\n"
 328        "       or      %1,%2,%2\n"
 329        "       stq_c   %2,0(%3)\n"
 330        "       beq     %2,2f\n"
 331#ifdef CONFIG_SMP
 332        "       mb\n"
 333#endif
 334        ".subsection 2\n"
 335        "2:     br      1b\n"
 336        ".previous"
 337        : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
 338        : "r" ((long)m), "1" (val) : "memory");
 339
 340        return ret;
 341}
 342
 343static inline unsigned long
 344__xchg_u16(volatile short *m, unsigned long val)
 345{
 346        unsigned long ret, tmp, addr64;
 347
 348        __asm__ __volatile__(
 349        "       andnot  %4,7,%3\n"
 350        "       inswl   %1,%4,%1\n"
 351        "1:     ldq_l   %2,0(%3)\n"
 352        "       extwl   %2,%4,%0\n"
 353        "       mskwl   %2,%4,%2\n"
 354        "       or      %1,%2,%2\n"
 355        "       stq_c   %2,0(%3)\n"
 356        "       beq     %2,2f\n"
 357#ifdef CONFIG_SMP
 358        "       mb\n"
 359#endif
 360        ".subsection 2\n"
 361        "2:     br      1b\n"
 362        ".previous"
 363        : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
 364        : "r" ((long)m), "1" (val) : "memory");
 365
 366        return ret;
 367}
 368
 369static inline unsigned long
 370__xchg_u32(volatile int *m, unsigned long val)
 371{
 372        unsigned long dummy;
 373
 374        __asm__ __volatile__(
 375        "1:     ldl_l %0,%4\n"
 376        "       bis $31,%3,%1\n"
 377        "       stl_c %1,%2\n"
 378        "       beq %1,2f\n"
 379#ifdef CONFIG_SMP
 380        "       mb\n"
 381#endif
 382        ".subsection 2\n"
 383        "2:     br 1b\n"
 384        ".previous"
 385        : "=&r" (val), "=&r" (dummy), "=m" (*m)
 386        : "rI" (val), "m" (*m) : "memory");
 387
 388        return val;
 389}
 390
 391static inline unsigned long
 392__xchg_u64(volatile long *m, unsigned long val)
 393{
 394        unsigned long dummy;
 395
 396        __asm__ __volatile__(
 397        "1:     ldq_l %0,%4\n"
 398        "       bis $31,%3,%1\n"
 399        "       stq_c %1,%2\n"
 400        "       beq %1,2f\n"
 401#ifdef CONFIG_SMP
 402        "       mb\n"
 403#endif
 404        ".subsection 2\n"
 405        "2:     br 1b\n"
 406        ".previous"
 407        : "=&r" (val), "=&r" (dummy), "=m" (*m)
 408        : "rI" (val), "m" (*m) : "memory");
 409
 410        return val;
 411}
 412
 413/* This function doesn't exist, so you'll get a linker error
 414   if something tries to do an invalid xchg().  */
 415extern void __xchg_called_with_bad_pointer(void);
 416
 417#define __xchg(ptr, x, size) \
 418({ \
 419        unsigned long __xchg__res; \
 420        volatile void *__xchg__ptr = (ptr); \
 421        switch (size) { \
 422                case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \
 423                case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \
 424                case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \
 425                case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \
 426                default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
 427        } \
 428        __xchg__res; \
 429})
 430
 431#define xchg(ptr,x)                                                          \
 432  ({                                                                         \
 433     __typeof__(*(ptr)) _x_ = (x);                                           \
 434     (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
 435  })
 436
 437static inline unsigned long
 438__xchg_u8_local(volatile char *m, unsigned long val)
 439{
 440        unsigned long ret, tmp, addr64;
 441
 442        __asm__ __volatile__(
 443        "       andnot  %4,7,%3\n"
 444        "       insbl   %1,%4,%1\n"
 445        "1:     ldq_l   %2,0(%3)\n"
 446        "       extbl   %2,%4,%0\n"
 447        "       mskbl   %2,%4,%2\n"
 448        "       or      %1,%2,%2\n"
 449        "       stq_c   %2,0(%3)\n"
 450        "       beq     %2,2f\n"
 451        ".subsection 2\n"
 452        "2:     br      1b\n"
 453        ".previous"
 454        : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
 455        : "r" ((long)m), "1" (val) : "memory");
 456
 457        return ret;
 458}
 459
 460static inline unsigned long
 461__xchg_u16_local(volatile short *m, unsigned long val)
 462{
 463        unsigned long ret, tmp, addr64;
 464
 465        __asm__ __volatile__(
 466        "       andnot  %4,7,%3\n"
 467        "       inswl   %1,%4,%1\n"
 468        "1:     ldq_l   %2,0(%3)\n"
 469        "       extwl   %2,%4,%0\n"
 470        "       mskwl   %2,%4,%2\n"
 471        "       or      %1,%2,%2\n"
 472        "       stq_c   %2,0(%3)\n"
 473        "       beq     %2,2f\n"
 474        ".subsection 2\n"
 475        "2:     br      1b\n"
 476        ".previous"
 477        : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
 478        : "r" ((long)m), "1" (val) : "memory");
 479
 480        return ret;
 481}
 482
 483static inline unsigned long
 484__xchg_u32_local(volatile int *m, unsigned long val)
 485{
 486        unsigned long dummy;
 487
 488        __asm__ __volatile__(
 489        "1:     ldl_l %0,%4\n"
 490        "       bis $31,%3,%1\n"
 491        "       stl_c %1,%2\n"
 492        "       beq %1,2f\n"
 493        ".subsection 2\n"
 494        "2:     br 1b\n"
 495        ".previous"
 496        : "=&r" (val), "=&r" (dummy), "=m" (*m)
 497        : "rI" (val), "m" (*m) : "memory");
 498
 499        return val;
 500}
 501
 502static inline unsigned long
 503__xchg_u64_local(volatile long *m, unsigned long val)
 504{
 505        unsigned long dummy;
 506
 507        __asm__ __volatile__(
 508        "1:     ldq_l %0,%4\n"
 509        "       bis $31,%3,%1\n"
 510        "       stq_c %1,%2\n"
 511        "       beq %1,2f\n"
 512        ".subsection 2\n"
 513        "2:     br 1b\n"
 514        ".previous"
 515        : "=&r" (val), "=&r" (dummy), "=m" (*m)
 516        : "rI" (val), "m" (*m) : "memory");
 517
 518        return val;
 519}
 520
 521#define __xchg_local(ptr, x, size) \
 522({ \
 523        unsigned long __xchg__res; \
 524        volatile void *__xchg__ptr = (ptr); \
 525        switch (size) { \
 526                case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \
 527                case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \
 528                case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \
 529                case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \
 530                default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
 531        } \
 532        __xchg__res; \
 533})
 534
 535#define xchg_local(ptr,x)                                                    \
 536  ({                                                                         \
 537     __typeof__(*(ptr)) _x_ = (x);                                           \
 538     (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_,            \
 539                sizeof(*(ptr))); \
 540  })
 541
 542/* 
 543 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 544 * store NEW in MEM.  Return the initial value in MEM.  Success is
 545 * indicated by comparing RETURN with OLD.
 546 *
 547 * The memory barrier should be placed in SMP only when we actually
 548 * make the change. If we don't change anything (so if the returned
 549 * prev is equal to old) then we aren't acquiring anything new and
 550 * we don't need any memory barrier as far I can tell.
 551 */
 552
 553#define __HAVE_ARCH_CMPXCHG 1
 554
 555static inline unsigned long
 556__cmpxchg_u8(volatile char *m, long old, long new)
 557{
 558        unsigned long prev, tmp, cmp, addr64;
 559
 560        __asm__ __volatile__(
 561        "       andnot  %5,7,%4\n"
 562        "       insbl   %1,%5,%1\n"
 563        "1:     ldq_l   %2,0(%4)\n"
 564        "       extbl   %2,%5,%0\n"
 565        "       cmpeq   %0,%6,%3\n"
 566        "       beq     %3,2f\n"
 567        "       mskbl   %2,%5,%2\n"
 568        "       or      %1,%2,%2\n"
 569        "       stq_c   %2,0(%4)\n"
 570        "       beq     %2,3f\n"
 571#ifdef CONFIG_SMP
 572        "       mb\n"
 573#endif
 574        "2:\n"
 575        ".subsection 2\n"
 576        "3:     br      1b\n"
 577        ".previous"
 578        : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
 579        : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
 580
 581        return prev;
 582}
 583
 584static inline unsigned long
 585__cmpxchg_u16(volatile short *m, long old, long new)
 586{
 587        unsigned long prev, tmp, cmp, addr64;
 588
 589        __asm__ __volatile__(
 590        "       andnot  %5,7,%4\n"
 591        "       inswl   %1,%5,%1\n"
 592        "1:     ldq_l   %2,0(%4)\n"
 593        "       extwl   %2,%5,%0\n"
 594        "       cmpeq   %0,%6,%3\n"
 595        "       beq     %3,2f\n"
 596        "       mskwl   %2,%5,%2\n"
 597        "       or      %1,%2,%2\n"
 598        "       stq_c   %2,0(%4)\n"
 599        "       beq     %2,3f\n"
 600#ifdef CONFIG_SMP
 601        "       mb\n"
 602#endif
 603        "2:\n"
 604        ".subsection 2\n"
 605        "3:     br      1b\n"
 606        ".previous"
 607        : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
 608        : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
 609
 610        return prev;
 611}
 612
 613static inline unsigned long
 614__cmpxchg_u32(volatile int *m, int old, int new)
 615{
 616        unsigned long prev, cmp;
 617
 618        __asm__ __volatile__(
 619        "1:     ldl_l %0,%5\n"
 620        "       cmpeq %0,%3,%1\n"
 621        "       beq %1,2f\n"
 622        "       mov %4,%1\n"
 623        "       stl_c %1,%2\n"
 624        "       beq %1,3f\n"
 625#ifdef CONFIG_SMP
 626        "       mb\n"
 627#endif
 628        "2:\n"
 629        ".subsection 2\n"
 630        "3:     br 1b\n"
 631        ".previous"
 632        : "=&r"(prev), "=&r"(cmp), "=m"(*m)
 633        : "r"((long) old), "r"(new), "m"(*m) : "memory");
 634
 635        return prev;
 636}
 637
 638static inline unsigned long
 639__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
 640{
 641        unsigned long prev, cmp;
 642
 643        __asm__ __volatile__(
 644        "1:     ldq_l %0,%5\n"
 645        "       cmpeq %0,%3,%1\n"
 646        "       beq %1,2f\n"
 647        "       mov %4,%1\n"
 648        "       stq_c %1,%2\n"
 649        "       beq %1,3f\n"
 650#ifdef CONFIG_SMP
 651        "       mb\n"
 652#endif
 653        "2:\n"
 654        ".subsection 2\n"
 655        "3:     br 1b\n"
 656        ".previous"
 657        : "=&r"(prev), "=&r"(cmp), "=m"(*m)
 658        : "r"((long) old), "r"(new), "m"(*m) : "memory");
 659
 660        return prev;
 661}
 662
 663/* This function doesn't exist, so you'll get a linker error
 664   if something tries to do an invalid cmpxchg().  */
 665extern void __cmpxchg_called_with_bad_pointer(void);
 666
 667static __always_inline unsigned long
 668__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
 669{
 670        switch (size) {
 671                case 1:
 672                        return __cmpxchg_u8(ptr, old, new);
 673                case 2:
 674                        return __cmpxchg_u16(ptr, old, new);
 675                case 4:
 676                        return __cmpxchg_u32(ptr, old, new);
 677                case 8:
 678                        return __cmpxchg_u64(ptr, old, new);
 679        }
 680        __cmpxchg_called_with_bad_pointer();
 681        return old;
 682}
 683
 684#define cmpxchg(ptr,o,n)                                                 \
 685  ({                                                                     \
 686     __typeof__(*(ptr)) _o_ = (o);                                       \
 687     __typeof__(*(ptr)) _n_ = (n);                                       \
 688     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,           \
 689                                    (unsigned long)_n_, sizeof(*(ptr))); \
 690  })
 691
 692static inline unsigned long
 693__cmpxchg_u8_local(volatile char *m, long old, long new)
 694{
 695        unsigned long prev, tmp, cmp, addr64;
 696
 697        __asm__ __volatile__(
 698        "       andnot  %5,7,%4\n"
 699        "       insbl   %1,%5,%1\n"
 700        "1:     ldq_l   %2,0(%4)\n"
 701        "       extbl   %2,%5,%0\n"
 702        "       cmpeq   %0,%6,%3\n"
 703        "       beq     %3,2f\n"
 704        "       mskbl   %2,%5,%2\n"
 705        "       or      %1,%2,%2\n"
 706        "       stq_c   %2,0(%4)\n"
 707        "       beq     %2,3f\n"
 708        "2:\n"
 709        ".subsection 2\n"
 710        "3:     br      1b\n"
 711        ".previous"
 712        : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
 713        : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
 714
 715        return prev;
 716}
 717
 718static inline unsigned long
 719__cmpxchg_u16_local(volatile short *m, long old, long new)
 720{
 721        unsigned long prev, tmp, cmp, addr64;
 722
 723        __asm__ __volatile__(
 724        "       andnot  %5,7,%4\n"
 725        "       inswl   %1,%5,%1\n"
 726        "1:     ldq_l   %2,0(%4)\n"
 727        "       extwl   %2,%5,%0\n"
 728        "       cmpeq   %0,%6,%3\n"
 729        "       beq     %3,2f\n"
 730        "       mskwl   %2,%5,%2\n"
 731        "       or      %1,%2,%2\n"
 732        "       stq_c   %2,0(%4)\n"
 733        "       beq     %2,3f\n"
 734        "2:\n"
 735        ".subsection 2\n"
 736        "3:     br      1b\n"
 737        ".previous"
 738        : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
 739        : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
 740
 741        return prev;
 742}
 743
 744static inline unsigned long
 745__cmpxchg_u32_local(volatile int *m, int old, int new)
 746{
 747        unsigned long prev, cmp;
 748
 749        __asm__ __volatile__(
 750        "1:     ldl_l %0,%5\n"
 751        "       cmpeq %0,%3,%1\n"
 752        "       beq %1,2f\n"
 753        "       mov %4,%1\n"
 754        "       stl_c %1,%2\n"
 755        "       beq %1,3f\n"
 756        "2:\n"
 757        ".subsection 2\n"
 758        "3:     br 1b\n"
 759        ".previous"
 760        : "=&r"(prev), "=&r"(cmp), "=m"(*m)
 761        : "r"((long) old), "r"(new), "m"(*m) : "memory");
 762
 763        return prev;
 764}
 765
 766static inline unsigned long
 767__cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new)
 768{
 769        unsigned long prev, cmp;
 770
 771        __asm__ __volatile__(
 772        "1:     ldq_l %0,%5\n"
 773        "       cmpeq %0,%3,%1\n"
 774        "       beq %1,2f\n"
 775        "       mov %4,%1\n"
 776        "       stq_c %1,%2\n"
 777        "       beq %1,3f\n"
 778        "2:\n"
 779        ".subsection 2\n"
 780        "3:     br 1b\n"
 781        ".previous"
 782        : "=&r"(prev), "=&r"(cmp), "=m"(*m)
 783        : "r"((long) old), "r"(new), "m"(*m) : "memory");
 784
 785        return prev;
 786}
 787
 788static __always_inline unsigned long
 789__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
 790                int size)
 791{
 792        switch (size) {
 793                case 1:
 794                        return __cmpxchg_u8_local(ptr, old, new);
 795                case 2:
 796                        return __cmpxchg_u16_local(ptr, old, new);
 797                case 4:
 798                        return __cmpxchg_u32_local(ptr, old, new);
 799                case 8:
 800                        return __cmpxchg_u64_local(ptr, old, new);
 801        }
 802        __cmpxchg_called_with_bad_pointer();
 803        return old;
 804}
 805
 806#define cmpxchg_local(ptr,o,n)                                           \
 807  ({                                                                     \
 808     __typeof__(*(ptr)) _o_ = (o);                                       \
 809     __typeof__(*(ptr)) _n_ = (n);                                       \
 810     (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,     \
 811                                    (unsigned long)_n_, sizeof(*(ptr))); \
 812  })
 813
 814#endif /* __ASSEMBLY__ */
 815
 816#define arch_align_stack(x) (x)
 817
 818#endif
 819