linux/arch/ia64/include/asm/processor.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_IA64_PROCESSOR_H
   3#define _ASM_IA64_PROCESSOR_H
   4
   5/*
   6 * Copyright (C) 1998-2004 Hewlett-Packard Co
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 *      Stephane Eranian <eranian@hpl.hp.com>
   9 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  10 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  11 *
  12 * 11/24/98     S.Eranian       added ia64_set_iva()
  13 * 12/03/99     D. Mosberger    implement thread_saved_pc() via kernel unwind API
  14 * 06/16/00     A. Mallick      added csd/ssd/tssd for ia32 support
  15 */
  16
  17
  18#include <asm/intrinsics.h>
  19#include <asm/kregs.h>
  20#include <asm/ptrace.h>
  21#include <asm/ustack.h>
  22
  23#define IA64_NUM_PHYS_STACK_REG 96
  24#define IA64_NUM_DBG_REGS       8
  25
  26#define DEFAULT_MAP_BASE        __IA64_UL_CONST(0x2000000000000000)
  27#define DEFAULT_TASK_SIZE       __IA64_UL_CONST(0xa000000000000000)
  28
  29/*
  30 * TASK_SIZE really is a mis-named.  It really is the maximum user
  31 * space address (plus one).  On IA-64, there are five regions of 2TB
  32 * each (assuming 8KB page size), for a total of 8TB of user virtual
  33 * address space.
  34 */
  35#define TASK_SIZE               DEFAULT_TASK_SIZE
  36
  37/*
  38 * This decides where the kernel will search for a free chunk of vm
  39 * space during mmap's.
  40 */
  41#define TASK_UNMAPPED_BASE      (current->thread.map_base)
  42
  43#define IA64_THREAD_FPH_VALID   (__IA64_UL(1) << 0)     /* floating-point high state valid? */
  44#define IA64_THREAD_DBG_VALID   (__IA64_UL(1) << 1)     /* debug registers valid? */
  45#define IA64_THREAD_PM_VALID    (__IA64_UL(1) << 2)     /* performance registers valid? */
  46#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3)     /* don't log unaligned accesses */
  47#define IA64_THREAD_UAC_SIGBUS  (__IA64_UL(1) << 4)     /* generate SIGBUS on unaligned acc. */
  48#define IA64_THREAD_MIGRATION   (__IA64_UL(1) << 5)     /* require migration
  49                                                           sync at ctx sw */
  50#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)   /* don't log any fpswa faults */
  51#define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)   /* send a SIGFPE for fpswa faults */
  52
  53#define IA64_THREAD_UAC_SHIFT   3
  54#define IA64_THREAD_UAC_MASK    (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
  55#define IA64_THREAD_FPEMU_SHIFT 6
  56#define IA64_THREAD_FPEMU_MASK  (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
  57
  58
  59/*
  60 * This shift should be large enough to be able to represent 1000000000/itc_freq with good
  61 * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
  62 * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
  63 */
  64#define IA64_NSEC_PER_CYC_SHIFT 30
  65
  66#ifndef __ASSEMBLY__
  67
  68#include <linux/cache.h>
  69#include <linux/compiler.h>
  70#include <linux/threads.h>
  71#include <linux/types.h>
  72#include <linux/bitops.h>
  73
  74#include <asm/fpu.h>
  75#include <asm/page.h>
  76#include <asm/percpu.h>
  77#include <asm/rse.h>
  78#include <asm/unwind.h>
  79#include <linux/atomic.h>
  80#ifdef CONFIG_NUMA
  81#include <asm/nodedata.h>
  82#endif
  83
  84/* like above but expressed as bitfields for more efficient access: */
  85struct ia64_psr {
  86        __u64 reserved0 : 1;
  87        __u64 be : 1;
  88        __u64 up : 1;
  89        __u64 ac : 1;
  90        __u64 mfl : 1;
  91        __u64 mfh : 1;
  92        __u64 reserved1 : 7;
  93        __u64 ic : 1;
  94        __u64 i : 1;
  95        __u64 pk : 1;
  96        __u64 reserved2 : 1;
  97        __u64 dt : 1;
  98        __u64 dfl : 1;
  99        __u64 dfh : 1;
 100        __u64 sp : 1;
 101        __u64 pp : 1;
 102        __u64 di : 1;
 103        __u64 si : 1;
 104        __u64 db : 1;
 105        __u64 lp : 1;
 106        __u64 tb : 1;
 107        __u64 rt : 1;
 108        __u64 reserved3 : 4;
 109        __u64 cpl : 2;
 110        __u64 is : 1;
 111        __u64 mc : 1;
 112        __u64 it : 1;
 113        __u64 id : 1;
 114        __u64 da : 1;
 115        __u64 dd : 1;
 116        __u64 ss : 1;
 117        __u64 ri : 2;
 118        __u64 ed : 1;
 119        __u64 bn : 1;
 120        __u64 reserved4 : 19;
 121};
 122
 123union ia64_isr {
 124        __u64  val;
 125        struct {
 126                __u64 code : 16;
 127                __u64 vector : 8;
 128                __u64 reserved1 : 8;
 129                __u64 x : 1;
 130                __u64 w : 1;
 131                __u64 r : 1;
 132                __u64 na : 1;
 133                __u64 sp : 1;
 134                __u64 rs : 1;
 135                __u64 ir : 1;
 136                __u64 ni : 1;
 137                __u64 so : 1;
 138                __u64 ei : 2;
 139                __u64 ed : 1;
 140                __u64 reserved2 : 20;
 141        };
 142};
 143
 144union ia64_lid {
 145        __u64 val;
 146        struct {
 147                __u64  rv  : 16;
 148                __u64  eid : 8;
 149                __u64  id  : 8;
 150                __u64  ig  : 32;
 151        };
 152};
 153
 154union ia64_tpr {
 155        __u64 val;
 156        struct {
 157                __u64 ig0 : 4;
 158                __u64 mic : 4;
 159                __u64 rsv : 8;
 160                __u64 mmi : 1;
 161                __u64 ig1 : 47;
 162        };
 163};
 164
 165union ia64_itir {
 166        __u64 val;
 167        struct {
 168                __u64 rv3  :  2; /* 0-1 */
 169                __u64 ps   :  6; /* 2-7 */
 170                __u64 key  : 24; /* 8-31 */
 171                __u64 rv4  : 32; /* 32-63 */
 172        };
 173};
 174
 175union  ia64_rr {
 176        __u64 val;
 177        struct {
 178                __u64  ve       :  1;  /* enable hw walker */
 179                __u64  reserved0:  1;  /* reserved */
 180                __u64  ps       :  6;  /* log page size */
 181                __u64  rid      : 24;  /* region id */
 182                __u64  reserved1: 32;  /* reserved */
 183        };
 184};
 185
 186/*
 187 * CPU type, hardware bug flags, and per-CPU state.  Frequently used
 188 * state comes earlier:
 189 */
 190struct cpuinfo_ia64 {
 191        unsigned int softirq_pending;
 192        unsigned long itm_delta;        /* # of clock cycles between clock ticks */
 193        unsigned long itm_next;         /* interval timer mask value to use for next clock tick */
 194        unsigned long nsec_per_cyc;     /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
 195        unsigned long unimpl_va_mask;   /* mask of unimplemented virtual address bits (from PAL) */
 196        unsigned long unimpl_pa_mask;   /* mask of unimplemented physical address bits (from PAL) */
 197        unsigned long itc_freq;         /* frequency of ITC counter */
 198        unsigned long proc_freq;        /* frequency of processor */
 199        unsigned long cyc_per_usec;     /* itc_freq/1000000 */
 200        unsigned long ptce_base;
 201        unsigned int ptce_count[2];
 202        unsigned int ptce_stride[2];
 203        struct task_struct *ksoftirqd;  /* kernel softirq daemon for this CPU */
 204
 205#ifdef CONFIG_SMP
 206        unsigned long loops_per_jiffy;
 207        int cpu;
 208        unsigned int socket_id; /* physical processor socket id */
 209        unsigned short core_id; /* core id */
 210        unsigned short thread_id; /* thread id */
 211        unsigned short num_log; /* Total number of logical processors on
 212                                 * this socket that were successfully booted */
 213        unsigned char cores_per_socket; /* Cores per processor socket */
 214        unsigned char threads_per_core; /* Threads per core */
 215#endif
 216
 217        /* CPUID-derived information: */
 218        unsigned long ppn;
 219        unsigned long features;
 220        unsigned char number;
 221        unsigned char revision;
 222        unsigned char model;
 223        unsigned char family;
 224        unsigned char archrev;
 225        char vendor[16];
 226        char *model_name;
 227
 228#ifdef CONFIG_NUMA
 229        struct ia64_node_data *node_data;
 230#endif
 231};
 232
 233DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
 234
 235/*
 236 * The "local" data variable.  It refers to the per-CPU data of the currently executing
 237 * CPU, much like "current" points to the per-task data of the currently executing task.
 238 * Do not use the address of local_cpu_data, since it will be different from
 239 * cpu_data(smp_processor_id())!
 240 */
 241#define local_cpu_data          (&__ia64_per_cpu_var(ia64_cpu_info))
 242#define cpu_data(cpu)           (&per_cpu(ia64_cpu_info, cpu))
 243
 244extern void print_cpu_info (struct cpuinfo_ia64 *);
 245
 246typedef struct {
 247        unsigned long seg;
 248} mm_segment_t;
 249
 250#define SET_UNALIGN_CTL(task,value)                                                             \
 251({                                                                                              \
 252        (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK)                  \
 253                                | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
 254        0;                                                                                      \
 255})
 256#define GET_UNALIGN_CTL(task,addr)                                                              \
 257({                                                                                              \
 258        put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT,        \
 259                 (int __user *) (addr));                                                        \
 260})
 261
 262#define SET_FPEMU_CTL(task,value)                                                               \
 263({                                                                                              \
 264        (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK)                \
 265                          | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK));   \
 266        0;                                                                                      \
 267})
 268#define GET_FPEMU_CTL(task,addr)                                                                \
 269({                                                                                              \
 270        put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT,    \
 271                 (int __user *) (addr));                                                        \
 272})
 273
 274struct thread_struct {
 275        __u32 flags;                    /* various thread flags (see IA64_THREAD_*) */
 276        /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
 277        __u8 on_ustack;                 /* executing on user-stacks? */
 278        __u8 pad[3];
 279        __u64 ksp;                      /* kernel stack pointer */
 280        __u64 map_base;                 /* base address for get_unmapped_area() */
 281        __u64 rbs_bot;                  /* the base address for the RBS */
 282        int last_fph_cpu;               /* CPU that may hold the contents of f32-f127 */
 283        unsigned long dbr[IA64_NUM_DBG_REGS];
 284        unsigned long ibr[IA64_NUM_DBG_REGS];
 285        struct ia64_fpreg fph[96];      /* saved/loaded on demand */
 286};
 287
 288#define INIT_THREAD {                                           \
 289        .flags =        0,                                      \
 290        .on_ustack =    0,                                      \
 291        .ksp =          0,                                      \
 292        .map_base =     DEFAULT_MAP_BASE,                       \
 293        .rbs_bot =      STACK_TOP - DEFAULT_USER_STACK_SIZE,    \
 294        .last_fph_cpu =  -1,                                    \
 295        .dbr =          {0, },                                  \
 296        .ibr =          {0, },                                  \
 297        .fph =          {{{{0}}}, }                             \
 298}
 299
 300#define start_thread(regs,new_ip,new_sp) do {                                                   \
 301        regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL))                \
 302                         & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS));              \
 303        regs->cr_iip = new_ip;                                                                  \
 304        regs->ar_rsc = 0xf;             /* eager mode, privilege level 3 */                     \
 305        regs->ar_rnat = 0;                                                                      \
 306        regs->ar_bspstore = current->thread.rbs_bot;                                            \
 307        regs->ar_fpsr = FPSR_DEFAULT;                                                           \
 308        regs->loadrs = 0;                                                                       \
 309        regs->r8 = get_dumpable(current->mm);   /* set "don't zap registers" flag */            \
 310        regs->r12 = new_sp - 16;        /* allocate 16 byte scratch area */                     \
 311        if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) {    \
 312                /*                                                                              \
 313                 * Zap scratch regs to avoid leaking bits between processes with different      \
 314                 * uid/privileges.                                                              \
 315                 */                                                                             \
 316                regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0;                                   \
 317                regs->r1 = 0; regs->r9  = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0;       \
 318        }                                                                                       \
 319} while (0)
 320
 321/* Forward declarations, a strange C thing... */
 322struct mm_struct;
 323struct task_struct;
 324
 325/*
 326 * Free all resources held by a thread. This is called after the
 327 * parent of DEAD_TASK has collected the exit status of the task via
 328 * wait().
 329 */
 330#define release_thread(dead_task)
 331
 332/* Get wait channel for task P.  */
 333extern unsigned long get_wchan (struct task_struct *p);
 334
 335/* Return instruction pointer of blocked task TSK.  */
 336#define KSTK_EIP(tsk)                                   \
 337  ({                                                    \
 338        struct pt_regs *_regs = task_pt_regs(tsk);      \
 339        _regs->cr_iip + ia64_psr(_regs)->ri;            \
 340  })
 341
 342/* Return stack pointer of blocked task TSK.  */
 343#define KSTK_ESP(tsk)  ((tsk)->thread.ksp)
 344
 345extern void ia64_getreg_unknown_kr (void);
 346extern void ia64_setreg_unknown_kr (void);
 347
 348#define ia64_get_kr(regnum)                                     \
 349({                                                              \
 350        unsigned long r = 0;                                    \
 351                                                                \
 352        switch (regnum) {                                       \
 353            case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;   \
 354            case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;   \
 355            case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;   \
 356            case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;   \
 357            case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;   \
 358            case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;   \
 359            case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;   \
 360            case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;   \
 361            default: ia64_getreg_unknown_kr(); break;           \
 362        }                                                       \
 363        r;                                                      \
 364})
 365
 366#define ia64_set_kr(regnum, r)                                  \
 367({                                                              \
 368        switch (regnum) {                                       \
 369            case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;    \
 370            case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;    \
 371            case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;    \
 372            case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;    \
 373            case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;    \
 374            case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;    \
 375            case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;    \
 376            case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;    \
 377            default: ia64_setreg_unknown_kr(); break;           \
 378        }                                                       \
 379})
 380
 381/*
 382 * The following three macros can't be inline functions because we don't have struct
 383 * task_struct at this point.
 384 */
 385
 386/*
 387 * Return TRUE if task T owns the fph partition of the CPU we're running on.
 388 * Must be called from code that has preemption disabled.
 389 */
 390#define ia64_is_local_fpu_owner(t)                                                              \
 391({                                                                                              \
 392        struct task_struct *__ia64_islfo_task = (t);                                            \
 393        (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()                           \
 394         && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER));        \
 395})
 396
 397/*
 398 * Mark task T as owning the fph partition of the CPU we're running on.
 399 * Must be called from code that has preemption disabled.
 400 */
 401#define ia64_set_local_fpu_owner(t) do {                                                \
 402        struct task_struct *__ia64_slfo_task = (t);                                     \
 403        __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();                     \
 404        ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);               \
 405} while (0)
 406
 407/* Mark the fph partition of task T as being invalid on all CPUs.  */
 408#define ia64_drop_fpu(t)        ((t)->thread.last_fph_cpu = -1)
 409
 410extern void __ia64_init_fpu (void);
 411extern void __ia64_save_fpu (struct ia64_fpreg *fph);
 412extern void __ia64_load_fpu (struct ia64_fpreg *fph);
 413extern void ia64_save_debug_regs (unsigned long *save_area);
 414extern void ia64_load_debug_regs (unsigned long *save_area);
 415
 416#define ia64_fph_enable()       do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
 417#define ia64_fph_disable()      do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
 418
 419/* load fp 0.0 into fph */
 420static inline void
 421ia64_init_fpu (void) {
 422        ia64_fph_enable();
 423        __ia64_init_fpu();
 424        ia64_fph_disable();
 425}
 426
 427/* save f32-f127 at FPH */
 428static inline void
 429ia64_save_fpu (struct ia64_fpreg *fph) {
 430        ia64_fph_enable();
 431        __ia64_save_fpu(fph);
 432        ia64_fph_disable();
 433}
 434
 435/* load f32-f127 from FPH */
 436static inline void
 437ia64_load_fpu (struct ia64_fpreg *fph) {
 438        ia64_fph_enable();
 439        __ia64_load_fpu(fph);
 440        ia64_fph_disable();
 441}
 442
 443static inline __u64
 444ia64_clear_ic (void)
 445{
 446        __u64 psr;
 447        psr = ia64_getreg(_IA64_REG_PSR);
 448        ia64_stop();
 449        ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
 450        ia64_srlz_i();
 451        return psr;
 452}
 453
 454/*
 455 * Restore the psr.
 456 */
 457static inline void
 458ia64_set_psr (__u64 psr)
 459{
 460        ia64_stop();
 461        ia64_setreg(_IA64_REG_PSR_L, psr);
 462        ia64_srlz_i();
 463}
 464
 465/*
 466 * Insert a translation into an instruction and/or data translation
 467 * register.
 468 */
 469static inline void
 470ia64_itr (__u64 target_mask, __u64 tr_num,
 471          __u64 vmaddr, __u64 pte,
 472          __u64 log_page_size)
 473{
 474        ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
 475        ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
 476        ia64_stop();
 477        if (target_mask & 0x1)
 478                ia64_itri(tr_num, pte);
 479        if (target_mask & 0x2)
 480                ia64_itrd(tr_num, pte);
 481}
 482
 483/*
 484 * Insert a translation into the instruction and/or data translation
 485 * cache.
 486 */
 487static inline void
 488ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
 489          __u64 log_page_size)
 490{
 491        ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
 492        ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
 493        ia64_stop();
 494        /* as per EAS2.6, itc must be the last instruction in an instruction group */
 495        if (target_mask & 0x1)
 496                ia64_itci(pte);
 497        if (target_mask & 0x2)
 498                ia64_itcd(pte);
 499}
 500
 501/*
 502 * Purge a range of addresses from instruction and/or data translation
 503 * register(s).
 504 */
 505static inline void
 506ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
 507{
 508        if (target_mask & 0x1)
 509                ia64_ptri(vmaddr, (log_size << 2));
 510        if (target_mask & 0x2)
 511                ia64_ptrd(vmaddr, (log_size << 2));
 512}
 513
 514/* Set the interrupt vector address.  The address must be suitably aligned (32KB).  */
 515static inline void
 516ia64_set_iva (void *ivt_addr)
 517{
 518        ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
 519        ia64_srlz_i();
 520}
 521
 522/* Set the page table address and control bits.  */
 523static inline void
 524ia64_set_pta (__u64 pta)
 525{
 526        /* Note: srlz.i implies srlz.d */
 527        ia64_setreg(_IA64_REG_CR_PTA, pta);
 528        ia64_srlz_i();
 529}
 530
 531static inline void
 532ia64_eoi (void)
 533{
 534        ia64_setreg(_IA64_REG_CR_EOI, 0);
 535        ia64_srlz_d();
 536}
 537
 538#define cpu_relax()     ia64_hint(ia64_hint_pause)
 539
 540static inline int
 541ia64_get_irr(unsigned int vector)
 542{
 543        unsigned int reg = vector / 64;
 544        unsigned int bit = vector % 64;
 545        u64 irr;
 546
 547        switch (reg) {
 548        case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
 549        case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
 550        case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
 551        case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
 552        }
 553
 554        return test_bit(bit, &irr);
 555}
 556
 557static inline void
 558ia64_set_lrr0 (unsigned long val)
 559{
 560        ia64_setreg(_IA64_REG_CR_LRR0, val);
 561        ia64_srlz_d();
 562}
 563
 564static inline void
 565ia64_set_lrr1 (unsigned long val)
 566{
 567        ia64_setreg(_IA64_REG_CR_LRR1, val);
 568        ia64_srlz_d();
 569}
 570
 571
 572/*
 573 * Given the address to which a spill occurred, return the unat bit
 574 * number that corresponds to this address.
 575 */
 576static inline __u64
 577ia64_unat_pos (void *spill_addr)
 578{
 579        return ((__u64) spill_addr >> 3) & 0x3f;
 580}
 581
 582/*
 583 * Set the NaT bit of an integer register which was spilled at address
 584 * SPILL_ADDR.  UNAT is the mask to be updated.
 585 */
 586static inline void
 587ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
 588{
 589        __u64 bit = ia64_unat_pos(spill_addr);
 590        __u64 mask = 1UL << bit;
 591
 592        *unat = (*unat & ~mask) | (nat << bit);
 593}
 594
 595static inline __u64
 596ia64_get_ivr (void)
 597{
 598        __u64 r;
 599        ia64_srlz_d();
 600        r = ia64_getreg(_IA64_REG_CR_IVR);
 601        ia64_srlz_d();
 602        return r;
 603}
 604
 605static inline void
 606ia64_set_dbr (__u64 regnum, __u64 value)
 607{
 608        __ia64_set_dbr(regnum, value);
 609#ifdef CONFIG_ITANIUM
 610        ia64_srlz_d();
 611#endif
 612}
 613
 614static inline __u64
 615ia64_get_dbr (__u64 regnum)
 616{
 617        __u64 retval;
 618
 619        retval = __ia64_get_dbr(regnum);
 620#ifdef CONFIG_ITANIUM
 621        ia64_srlz_d();
 622#endif
 623        return retval;
 624}
 625
 626static inline __u64
 627ia64_rotr (__u64 w, __u64 n)
 628{
 629        return (w >> n) | (w << (64 - n));
 630}
 631
 632#define ia64_rotl(w,n)  ia64_rotr((w), (64) - (n))
 633
 634/*
 635 * Take a mapped kernel address and return the equivalent address
 636 * in the region 7 identity mapped virtual area.
 637 */
 638static inline void *
 639ia64_imva (void *addr)
 640{
 641        void *result;
 642        result = (void *) ia64_tpa(addr);
 643        return __va(result);
 644}
 645
 646#define ARCH_HAS_PREFETCH
 647#define ARCH_HAS_PREFETCHW
 648#define ARCH_HAS_SPINLOCK_PREFETCH
 649#define PREFETCH_STRIDE                 L1_CACHE_BYTES
 650
 651static inline void
 652prefetch (const void *x)
 653{
 654         ia64_lfetch(ia64_lfhint_none, x);
 655}
 656
 657static inline void
 658prefetchw (const void *x)
 659{
 660        ia64_lfetch_excl(ia64_lfhint_none, x);
 661}
 662
 663#define spin_lock_prefetch(x)   prefetchw(x)
 664
 665extern unsigned long boot_option_idle_override;
 666
 667enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
 668                         IDLE_NOMWAIT, IDLE_POLL};
 669
 670void default_idle(void);
 671
 672#endif /* !__ASSEMBLY__ */
 673
 674#endif /* _ASM_IA64_PROCESSOR_H */
 675