linux/arch/ia64/include/asm/sn/pda.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
   7 */
   8#ifndef _ASM_IA64_SN_PDA_H
   9#define _ASM_IA64_SN_PDA_H
  10
  11#include <linux/cache.h>
  12#include <asm/percpu.h>
  13#include <asm/system.h>
  14
  15
  16/*
  17 * CPU-specific data structure.
  18 *
  19 * One of these structures is allocated for each cpu of a NUMA system.
  20 *
  21 * This structure provides a convenient way of keeping together 
  22 * all SN per-cpu data structures. 
  23 */
  24
  25typedef struct pda_s {
  26
  27        /*
  28         * Support for SN LEDs
  29         */
  30        volatile short  *led_address;
  31        u8              led_state;
  32        u8              hb_state;       /* supports blinking heartbeat leds */
  33        unsigned int    hb_count;
  34
  35        unsigned int    idle_flag;
  36        
  37        volatile unsigned long *bedrock_rev_id;
  38        volatile unsigned long *pio_write_status_addr;
  39        unsigned long pio_write_status_val;
  40        volatile unsigned long *pio_shub_war_cam_addr;
  41
  42        unsigned long   sn_in_service_ivecs[4];
  43        int             sn_lb_int_war_ticks;
  44        int             sn_last_irq;
  45        int             sn_first_irq;
  46} pda_t;
  47
  48
  49#define CACHE_ALIGN(x)  (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  50
  51/*
  52 * PDA
  53 * Per-cpu private data area for each cpu. The PDA is located immediately after
  54 * the IA64 cpu_data area. A full page is allocated for the cp_data area for each
  55 * cpu but only a small amout of the page is actually used. We put the SNIA PDA
  56 * in the same page as the cpu_data area. Note that there is a check in the setup
  57 * code to verify that we don't overflow the page.
  58 *
  59 * Seems like we should should cache-line align the pda so that any changes in the
  60 * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
  61 * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
  62 */
  63DECLARE_PER_CPU(struct pda_s, pda_percpu);
  64
  65#define pda             (&__ia64_per_cpu_var(pda_percpu))
  66
  67#define pdacpu(cpu)     (&per_cpu(pda_percpu, cpu))
  68
  69#endif /* _ASM_IA64_SN_PDA_H */
  70