linux/arch/powerpc/include/asm/spu.h
<<
>>
Prefs
   1/*
   2 * SPU core / file system interface and HW structures
   3 *
   4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
   5 *
   6 * Author: Arnd Bergmann <arndb@de.ibm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2, or (at your option)
  11 * any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#ifndef _SPU_H
  24#define _SPU_H
  25#ifdef __KERNEL__
  26
  27#include <linux/workqueue.h>
  28#include <linux/sysdev.h>
  29
  30#define LS_SIZE (256 * 1024)
  31#define LS_ADDR_MASK (LS_SIZE - 1)
  32
  33#define MFC_PUT_CMD             0x20
  34#define MFC_PUTS_CMD            0x28
  35#define MFC_PUTR_CMD            0x30
  36#define MFC_PUTF_CMD            0x22
  37#define MFC_PUTB_CMD            0x21
  38#define MFC_PUTFS_CMD           0x2A
  39#define MFC_PUTBS_CMD           0x29
  40#define MFC_PUTRF_CMD           0x32
  41#define MFC_PUTRB_CMD           0x31
  42#define MFC_PUTL_CMD            0x24
  43#define MFC_PUTRL_CMD           0x34
  44#define MFC_PUTLF_CMD           0x26
  45#define MFC_PUTLB_CMD           0x25
  46#define MFC_PUTRLF_CMD          0x36
  47#define MFC_PUTRLB_CMD          0x35
  48
  49#define MFC_GET_CMD             0x40
  50#define MFC_GETS_CMD            0x48
  51#define MFC_GETF_CMD            0x42
  52#define MFC_GETB_CMD            0x41
  53#define MFC_GETFS_CMD           0x4A
  54#define MFC_GETBS_CMD           0x49
  55#define MFC_GETL_CMD            0x44
  56#define MFC_GETLF_CMD           0x46
  57#define MFC_GETLB_CMD           0x45
  58
  59#define MFC_SDCRT_CMD           0x80
  60#define MFC_SDCRTST_CMD         0x81
  61#define MFC_SDCRZ_CMD           0x89
  62#define MFC_SDCRS_CMD           0x8D
  63#define MFC_SDCRF_CMD           0x8F
  64
  65#define MFC_GETLLAR_CMD         0xD0
  66#define MFC_PUTLLC_CMD          0xB4
  67#define MFC_PUTLLUC_CMD         0xB0
  68#define MFC_PUTQLLUC_CMD        0xB8
  69#define MFC_SNDSIG_CMD          0xA0
  70#define MFC_SNDSIGB_CMD         0xA1
  71#define MFC_SNDSIGF_CMD         0xA2
  72#define MFC_BARRIER_CMD         0xC0
  73#define MFC_EIEIO_CMD           0xC8
  74#define MFC_SYNC_CMD            0xCC
  75
  76#define MFC_MIN_DMA_SIZE_SHIFT  4       /* 16 bytes */
  77#define MFC_MAX_DMA_SIZE_SHIFT  14      /* 16384 bytes */
  78#define MFC_MIN_DMA_SIZE        (1 << MFC_MIN_DMA_SIZE_SHIFT)
  79#define MFC_MAX_DMA_SIZE        (1 << MFC_MAX_DMA_SIZE_SHIFT)
  80#define MFC_MIN_DMA_SIZE_MASK   (MFC_MIN_DMA_SIZE - 1)
  81#define MFC_MAX_DMA_SIZE_MASK   (MFC_MAX_DMA_SIZE - 1)
  82#define MFC_MIN_DMA_LIST_SIZE   0x0008  /*   8 bytes */
  83#define MFC_MAX_DMA_LIST_SIZE   0x4000  /* 16K bytes */
  84
  85#define MFC_TAGID_TO_TAGMASK(tag_id)  (1 << (tag_id & 0x1F))
  86
  87/* Events for Channels 0-2 */
  88#define MFC_DMA_TAG_STATUS_UPDATE_EVENT     0x00000001
  89#define MFC_DMA_TAG_CMD_STALL_NOTIFY_EVENT  0x00000002
  90#define MFC_DMA_QUEUE_AVAILABLE_EVENT       0x00000008
  91#define MFC_SPU_MAILBOX_WRITTEN_EVENT       0x00000010
  92#define MFC_DECREMENTER_EVENT               0x00000020
  93#define MFC_PU_INT_MAILBOX_AVAILABLE_EVENT  0x00000040
  94#define MFC_PU_MAILBOX_AVAILABLE_EVENT      0x00000080
  95#define MFC_SIGNAL_2_EVENT                  0x00000100
  96#define MFC_SIGNAL_1_EVENT                  0x00000200
  97#define MFC_LLR_LOST_EVENT                  0x00000400
  98#define MFC_PRIV_ATTN_EVENT                 0x00000800
  99#define MFC_MULTI_SRC_EVENT                 0x00001000
 100
 101/* Flag indicating progress during context switch. */
 102#define SPU_CONTEXT_SWITCH_PENDING      0UL
 103#define SPU_CONTEXT_FAULT_PENDING       1UL
 104
 105struct spu_context;
 106struct spu_runqueue;
 107struct spu_lscsa;
 108struct device_node;
 109
 110enum spu_utilization_state {
 111        SPU_UTIL_USER,
 112        SPU_UTIL_SYSTEM,
 113        SPU_UTIL_IOWAIT,
 114        SPU_UTIL_IDLE_LOADED,
 115        SPU_UTIL_MAX
 116};
 117
 118struct spu {
 119        const char *name;
 120        unsigned long local_store_phys;
 121        u8 *local_store;
 122        unsigned long problem_phys;
 123        struct spu_problem __iomem *problem;
 124        struct spu_priv2 __iomem *priv2;
 125        struct list_head cbe_list;
 126        struct list_head full_list;
 127        enum { SPU_FREE, SPU_USED } alloc_state;
 128        int number;
 129        unsigned int irqs[3];
 130        u32 node;
 131        unsigned long flags;
 132        u64 class_0_pending;
 133        u64 class_0_dar;
 134        u64 class_1_dar;
 135        u64 class_1_dsisr;
 136        size_t ls_size;
 137        unsigned int slb_replace;
 138        struct mm_struct *mm;
 139        struct spu_context *ctx;
 140        struct spu_runqueue *rq;
 141        unsigned long long timestamp;
 142        pid_t pid;
 143        pid_t tgid;
 144        spinlock_t register_lock;
 145
 146        void (* wbox_callback)(struct spu *spu);
 147        void (* ibox_callback)(struct spu *spu);
 148        void (* stop_callback)(struct spu *spu, int irq);
 149        void (* mfc_callback)(struct spu *spu);
 150
 151        char irq_c0[8];
 152        char irq_c1[8];
 153        char irq_c2[8];
 154
 155        u64 spe_id;
 156
 157        void* pdata; /* platform private data */
 158
 159        /* of based platforms only */
 160        struct device_node *devnode;
 161
 162        /* native only */
 163        struct spu_priv1 __iomem *priv1;
 164
 165        /* beat only */
 166        u64 shadow_int_mask_RW[3];
 167
 168        struct sys_device sysdev;
 169
 170        int has_mem_affinity;
 171        struct list_head aff_list;
 172
 173        struct {
 174                /* protected by interrupt reentrancy */
 175                enum spu_utilization_state util_state;
 176                unsigned long long tstamp;
 177                unsigned long long times[SPU_UTIL_MAX];
 178                unsigned long long vol_ctx_switch;
 179                unsigned long long invol_ctx_switch;
 180                unsigned long long min_flt;
 181                unsigned long long maj_flt;
 182                unsigned long long hash_flt;
 183                unsigned long long slb_flt;
 184                unsigned long long class2_intr;
 185                unsigned long long libassist;
 186        } stats;
 187};
 188
 189struct cbe_spu_info {
 190        struct mutex list_mutex;
 191        struct list_head spus;
 192        int n_spus;
 193        int nr_active;
 194        atomic_t busy_spus;
 195        atomic_t reserved_spus;
 196};
 197
 198extern struct cbe_spu_info cbe_spu_info[];
 199
 200void spu_init_channels(struct spu *spu);
 201void spu_irq_setaffinity(struct spu *spu, int cpu);
 202
 203void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
 204                void *code, int code_size);
 205
 206#ifdef CONFIG_KEXEC
 207void crash_register_spus(struct list_head *list);
 208#else
 209static inline void crash_register_spus(struct list_head *list)
 210{
 211}
 212#endif
 213
 214extern void spu_invalidate_slbs(struct spu *spu);
 215extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
 216int spu_64k_pages_available(void);
 217
 218/* Calls from the memory management to the SPU */
 219struct mm_struct;
 220extern void spu_flush_all_slbs(struct mm_struct *mm);
 221
 222/* This interface allows a profiler (e.g., OProfile) to store a ref
 223 * to spu context information that it creates.  This caching technique
 224 * avoids the need to recreate this information after a save/restore operation.
 225 *
 226 * Assumes the caller has already incremented the ref count to
 227 * profile_info; then spu_context_destroy must call kref_put
 228 * on prof_info_kref.
 229 */
 230void spu_set_profile_private_kref(struct spu_context *ctx,
 231                                  struct kref *prof_info_kref,
 232                                  void ( * prof_info_release) (struct kref *kref));
 233
 234void *spu_get_profile_private_kref(struct spu_context *ctx);
 235
 236/* system callbacks from the SPU */
 237struct spu_syscall_block {
 238        u64 nr_ret;
 239        u64 parm[6];
 240};
 241extern long spu_sys_callback(struct spu_syscall_block *s);
 242
 243/* syscalls implemented in spufs */
 244struct file;
 245struct spufs_calls {
 246        long (*create_thread)(const char __user *name,
 247                                        unsigned int flags, mode_t mode,
 248                                        struct file *neighbor);
 249        long (*spu_run)(struct file *filp, __u32 __user *unpc,
 250                                                __u32 __user *ustatus);
 251        int (*coredump_extra_notes_size)(void);
 252        int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset);
 253        void (*notify_spus_active)(void);
 254        struct module *owner;
 255};
 256
 257/* return status from spu_run, same as in libspe */
 258#define SPE_EVENT_DMA_ALIGNMENT         0x0008  /*A DMA alignment error */
 259#define SPE_EVENT_SPE_ERROR             0x0010  /*An illegal instruction error*/
 260#define SPE_EVENT_SPE_DATA_SEGMENT      0x0020  /*A DMA segmentation error    */
 261#define SPE_EVENT_SPE_DATA_STORAGE      0x0040  /*A DMA storage error */
 262#define SPE_EVENT_INVALID_DMA           0x0800  /* Invalid MFC DMA */
 263
 264/*
 265 * Flags for sys_spu_create.
 266 */
 267#define SPU_CREATE_EVENTS_ENABLED       0x0001
 268#define SPU_CREATE_GANG                 0x0002
 269#define SPU_CREATE_NOSCHED              0x0004
 270#define SPU_CREATE_ISOLATE              0x0008
 271#define SPU_CREATE_AFFINITY_SPU         0x0010
 272#define SPU_CREATE_AFFINITY_MEM         0x0020
 273
 274#define SPU_CREATE_FLAG_ALL             0x003f /* mask of all valid flags */
 275
 276
 277int register_spu_syscalls(struct spufs_calls *calls);
 278void unregister_spu_syscalls(struct spufs_calls *calls);
 279
 280int spu_add_sysdev_attr(struct sysdev_attribute *attr);
 281void spu_remove_sysdev_attr(struct sysdev_attribute *attr);
 282
 283int spu_add_sysdev_attr_group(struct attribute_group *attrs);
 284void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
 285
 286int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
 287                unsigned long dsisr, unsigned *flt);
 288
 289/*
 290 * Notifier blocks:
 291 *
 292 * oprofile can get notified when a context switch is performed
 293 * on an spe. The notifer function that gets called is passed
 294 * a pointer to the SPU structure as well as the object-id that
 295 * identifies the binary running on that SPU now.
 296 *
 297 * For a context save, the object-id that is passed is zero,
 298 * identifying that the kernel will run from that moment on.
 299 *
 300 * For a context restore, the object-id is the value written
 301 * to object-id spufs file from user space and the notifer
 302 * function can assume that spu->ctx is valid.
 303 */
 304struct notifier_block;
 305int spu_switch_event_register(struct notifier_block * n);
 306int spu_switch_event_unregister(struct notifier_block * n);
 307
 308extern void notify_spus_active(void);
 309extern void do_notify_spus_active(void);
 310
 311/*
 312 * This defines the Local Store, Problem Area and Privilege Area of an SPU.
 313 */
 314
 315union mfc_tag_size_class_cmd {
 316        struct {
 317                u16 mfc_size;
 318                u16 mfc_tag;
 319                u8  pad;
 320                u8  mfc_rclassid;
 321                u16 mfc_cmd;
 322        } u;
 323        struct {
 324                u32 mfc_size_tag32;
 325                u32 mfc_class_cmd32;
 326        } by32;
 327        u64 all64;
 328};
 329
 330struct mfc_cq_sr {
 331        u64 mfc_cq_data0_RW;
 332        u64 mfc_cq_data1_RW;
 333        u64 mfc_cq_data2_RW;
 334        u64 mfc_cq_data3_RW;
 335};
 336
 337struct spu_problem {
 338#define MS_SYNC_PENDING         1L
 339        u64 spc_mssync_RW;                                      /* 0x0000 */
 340        u8  pad_0x0008_0x3000[0x3000 - 0x0008];
 341
 342        /* DMA Area */
 343        u8  pad_0x3000_0x3004[0x4];                             /* 0x3000 */
 344        u32 mfc_lsa_W;                                          /* 0x3004 */
 345        u64 mfc_ea_W;                                           /* 0x3008 */
 346        union mfc_tag_size_class_cmd mfc_union_W;                       /* 0x3010 */
 347        u8  pad_0x3018_0x3104[0xec];                            /* 0x3018 */
 348        u32 dma_qstatus_R;                                      /* 0x3104 */
 349        u8  pad_0x3108_0x3204[0xfc];                            /* 0x3108 */
 350        u32 dma_querytype_RW;                                   /* 0x3204 */
 351        u8  pad_0x3208_0x321c[0x14];                            /* 0x3208 */
 352        u32 dma_querymask_RW;                                   /* 0x321c */
 353        u8  pad_0x3220_0x322c[0xc];                             /* 0x3220 */
 354        u32 dma_tagstatus_R;                                    /* 0x322c */
 355#define DMA_TAGSTATUS_INTR_ANY  1u
 356#define DMA_TAGSTATUS_INTR_ALL  2u
 357        u8  pad_0x3230_0x4000[0x4000 - 0x3230];                 /* 0x3230 */
 358
 359        /* SPU Control Area */
 360        u8  pad_0x4000_0x4004[0x4];                             /* 0x4000 */
 361        u32 pu_mb_R;                                            /* 0x4004 */
 362        u8  pad_0x4008_0x400c[0x4];                             /* 0x4008 */
 363        u32 spu_mb_W;                                           /* 0x400c */
 364        u8  pad_0x4010_0x4014[0x4];                             /* 0x4010 */
 365        u32 mb_stat_R;                                          /* 0x4014 */
 366        u8  pad_0x4018_0x401c[0x4];                             /* 0x4018 */
 367        u32 spu_runcntl_RW;                                     /* 0x401c */
 368#define SPU_RUNCNTL_STOP        0L
 369#define SPU_RUNCNTL_RUNNABLE    1L
 370#define SPU_RUNCNTL_ISOLATE     2L
 371        u8  pad_0x4020_0x4024[0x4];                             /* 0x4020 */
 372        u32 spu_status_R;                                       /* 0x4024 */
 373#define SPU_STOP_STATUS_SHIFT           16
 374#define SPU_STATUS_STOPPED              0x0
 375#define SPU_STATUS_RUNNING              0x1
 376#define SPU_STATUS_STOPPED_BY_STOP      0x2
 377#define SPU_STATUS_STOPPED_BY_HALT      0x4
 378#define SPU_STATUS_WAITING_FOR_CHANNEL  0x8
 379#define SPU_STATUS_SINGLE_STEP          0x10
 380#define SPU_STATUS_INVALID_INSTR        0x20
 381#define SPU_STATUS_INVALID_CH           0x40
 382#define SPU_STATUS_ISOLATED_STATE       0x80
 383#define SPU_STATUS_ISOLATED_LOAD_STATUS 0x200
 384#define SPU_STATUS_ISOLATED_EXIT_STATUS 0x400
 385        u8  pad_0x4028_0x402c[0x4];                             /* 0x4028 */
 386        u32 spu_spe_R;                                          /* 0x402c */
 387        u8  pad_0x4030_0x4034[0x4];                             /* 0x4030 */
 388        u32 spu_npc_RW;                                         /* 0x4034 */
 389        u8  pad_0x4038_0x14000[0x14000 - 0x4038];               /* 0x4038 */
 390
 391        /* Signal Notification Area */
 392        u8  pad_0x14000_0x1400c[0xc];                           /* 0x14000 */
 393        u32 signal_notify1;                                     /* 0x1400c */
 394        u8  pad_0x14010_0x1c00c[0x7ffc];                        /* 0x14010 */
 395        u32 signal_notify2;                                     /* 0x1c00c */
 396} __attribute__ ((aligned(0x20000)));
 397
 398/* SPU Privilege 2 State Area */
 399struct spu_priv2 {
 400        /* MFC Registers */
 401        u8  pad_0x0000_0x1100[0x1100 - 0x0000];                 /* 0x0000 */
 402
 403        /* SLB Management Registers */
 404        u8  pad_0x1100_0x1108[0x8];                             /* 0x1100 */
 405        u64 slb_index_W;                                        /* 0x1108 */
 406#define SLB_INDEX_MASK                          0x7L
 407        u64 slb_esid_RW;                                        /* 0x1110 */
 408        u64 slb_vsid_RW;                                        /* 0x1118 */
 409#define SLB_VSID_SUPERVISOR_STATE       (0x1ull << 11)
 410#define SLB_VSID_SUPERVISOR_STATE_MASK  (0x1ull << 11)
 411#define SLB_VSID_PROBLEM_STATE          (0x1ull << 10)
 412#define SLB_VSID_PROBLEM_STATE_MASK     (0x1ull << 10)
 413#define SLB_VSID_EXECUTE_SEGMENT        (0x1ull << 9)
 414#define SLB_VSID_NO_EXECUTE_SEGMENT     (0x1ull << 9)
 415#define SLB_VSID_EXECUTE_SEGMENT_MASK   (0x1ull << 9)
 416#define SLB_VSID_4K_PAGE                (0x0 << 8)
 417#define SLB_VSID_LARGE_PAGE             (0x1ull << 8)
 418#define SLB_VSID_PAGE_SIZE_MASK         (0x1ull << 8)
 419#define SLB_VSID_CLASS_MASK             (0x1ull << 7)
 420#define SLB_VSID_VIRTUAL_PAGE_SIZE_MASK (0x1ull << 6)
 421        u64 slb_invalidate_entry_W;                             /* 0x1120 */
 422        u64 slb_invalidate_all_W;                               /* 0x1128 */
 423        u8  pad_0x1130_0x2000[0x2000 - 0x1130];                 /* 0x1130 */
 424
 425        /* Context Save / Restore Area */
 426        struct mfc_cq_sr spuq[16];                              /* 0x2000 */
 427        struct mfc_cq_sr puq[8];                                /* 0x2200 */
 428        u8  pad_0x2300_0x3000[0x3000 - 0x2300];                 /* 0x2300 */
 429
 430        /* MFC Control */
 431        u64 mfc_control_RW;                                     /* 0x3000 */
 432#define MFC_CNTL_RESUME_DMA_QUEUE               (0ull << 0)
 433#define MFC_CNTL_SUSPEND_DMA_QUEUE              (1ull << 0)
 434#define MFC_CNTL_SUSPEND_DMA_QUEUE_MASK         (1ull << 0)
 435#define MFC_CNTL_SUSPEND_MASK                   (1ull << 4)
 436#define MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION     (0ull << 8)
 437#define MFC_CNTL_SUSPEND_IN_PROGRESS            (1ull << 8)
 438#define MFC_CNTL_SUSPEND_COMPLETE               (3ull << 8)
 439#define MFC_CNTL_SUSPEND_DMA_STATUS_MASK        (3ull << 8)
 440#define MFC_CNTL_DMA_QUEUES_EMPTY               (1ull << 14)
 441#define MFC_CNTL_DMA_QUEUES_EMPTY_MASK          (1ull << 14)
 442#define MFC_CNTL_PURGE_DMA_REQUEST              (1ull << 15)
 443#define MFC_CNTL_PURGE_DMA_IN_PROGRESS          (1ull << 24)
 444#define MFC_CNTL_PURGE_DMA_COMPLETE             (3ull << 24)
 445#define MFC_CNTL_PURGE_DMA_STATUS_MASK          (3ull << 24)
 446#define MFC_CNTL_RESTART_DMA_COMMAND            (1ull << 32)
 447#define MFC_CNTL_DMA_COMMAND_REISSUE_PENDING    (1ull << 32)
 448#define MFC_CNTL_DMA_COMMAND_REISSUE_STATUS_MASK (1ull << 32)
 449#define MFC_CNTL_MFC_PRIVILEGE_STATE            (2ull << 33)
 450#define MFC_CNTL_MFC_PROBLEM_STATE              (3ull << 33)
 451#define MFC_CNTL_MFC_KEY_PROTECTION_STATE_MASK  (3ull << 33)
 452#define MFC_CNTL_DECREMENTER_HALTED             (1ull << 35)
 453#define MFC_CNTL_DECREMENTER_RUNNING            (1ull << 40)
 454#define MFC_CNTL_DECREMENTER_STATUS_MASK        (1ull << 40)
 455        u8  pad_0x3008_0x4000[0x4000 - 0x3008];                 /* 0x3008 */
 456
 457        /* Interrupt Mailbox */
 458        u64 puint_mb_R;                                         /* 0x4000 */
 459        u8  pad_0x4008_0x4040[0x4040 - 0x4008];                 /* 0x4008 */
 460
 461        /* SPU Control */
 462        u64 spu_privcntl_RW;                                    /* 0x4040 */
 463#define SPU_PRIVCNTL_MODE_NORMAL                (0x0ull << 0)
 464#define SPU_PRIVCNTL_MODE_SINGLE_STEP           (0x1ull << 0)
 465#define SPU_PRIVCNTL_MODE_MASK                  (0x1ull << 0)
 466#define SPU_PRIVCNTL_NO_ATTENTION_EVENT         (0x0ull << 1)
 467#define SPU_PRIVCNTL_ATTENTION_EVENT            (0x1ull << 1)
 468#define SPU_PRIVCNTL_ATTENTION_EVENT_MASK       (0x1ull << 1)
 469#define SPU_PRIVCNT_LOAD_REQUEST_NORMAL         (0x0ull << 2)
 470#define SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK    (0x1ull << 2)
 471        u8  pad_0x4048_0x4058[0x10];                            /* 0x4048 */
 472        u64 spu_lslr_RW;                                        /* 0x4058 */
 473        u64 spu_chnlcntptr_RW;                                  /* 0x4060 */
 474        u64 spu_chnlcnt_RW;                                     /* 0x4068 */
 475        u64 spu_chnldata_RW;                                    /* 0x4070 */
 476        u64 spu_cfg_RW;                                         /* 0x4078 */
 477        u8  pad_0x4080_0x5000[0x5000 - 0x4080];                 /* 0x4080 */
 478
 479        /* PV2_ImplRegs: Implementation-specific privileged-state 2 regs */
 480        u64 spu_pm_trace_tag_status_RW;                         /* 0x5000 */
 481        u64 spu_tag_status_query_RW;                            /* 0x5008 */
 482#define TAG_STATUS_QUERY_CONDITION_BITS (0x3ull << 32)
 483#define TAG_STATUS_QUERY_MASK_BITS (0xffffffffull)
 484        u64 spu_cmd_buf1_RW;                                    /* 0x5010 */
 485#define SPU_COMMAND_BUFFER_1_LSA_BITS (0x7ffffull << 32)
 486#define SPU_COMMAND_BUFFER_1_EAH_BITS (0xffffffffull)
 487        u64 spu_cmd_buf2_RW;                                    /* 0x5018 */
 488#define SPU_COMMAND_BUFFER_2_EAL_BITS ((0xffffffffull) << 32)
 489#define SPU_COMMAND_BUFFER_2_TS_BITS (0xffffull << 16)
 490#define SPU_COMMAND_BUFFER_2_TAG_BITS (0x3full)
 491        u64 spu_atomic_status_RW;                               /* 0x5020 */
 492} __attribute__ ((aligned(0x20000)));
 493
 494/* SPU Privilege 1 State Area */
 495struct spu_priv1 {
 496        /* Control and Configuration Area */
 497        u64 mfc_sr1_RW;                                         /* 0x000 */
 498#define MFC_STATE1_LOCAL_STORAGE_DECODE_MASK    0x01ull
 499#define MFC_STATE1_BUS_TLBIE_MASK               0x02ull
 500#define MFC_STATE1_REAL_MODE_OFFSET_ENABLE_MASK 0x04ull
 501#define MFC_STATE1_PROBLEM_STATE_MASK           0x08ull
 502#define MFC_STATE1_RELOCATE_MASK                0x10ull
 503#define MFC_STATE1_MASTER_RUN_CONTROL_MASK      0x20ull
 504#define MFC_STATE1_TABLE_SEARCH_MASK            0x40ull
 505        u64 mfc_lpid_RW;                                        /* 0x008 */
 506        u64 spu_idr_RW;                                         /* 0x010 */
 507        u64 mfc_vr_RO;                                          /* 0x018 */
 508#define MFC_VERSION_BITS                (0xffff << 16)
 509#define MFC_REVISION_BITS               (0xffff)
 510#define MFC_GET_VERSION_BITS(vr)        (((vr) & MFC_VERSION_BITS) >> 16)
 511#define MFC_GET_REVISION_BITS(vr)       ((vr) & MFC_REVISION_BITS)
 512        u64 spu_vr_RO;                                          /* 0x020 */
 513#define SPU_VERSION_BITS                (0xffff << 16)
 514#define SPU_REVISION_BITS               (0xffff)
 515#define SPU_GET_VERSION_BITS(vr)        (vr & SPU_VERSION_BITS) >> 16
 516#define SPU_GET_REVISION_BITS(vr)       (vr & SPU_REVISION_BITS)
 517        u8  pad_0x28_0x100[0x100 - 0x28];                       /* 0x28 */
 518
 519        /* Interrupt Area */
 520        u64 int_mask_RW[3];                                     /* 0x100 */
 521#define CLASS0_ENABLE_DMA_ALIGNMENT_INTR                0x1L
 522#define CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR          0x2L
 523#define CLASS0_ENABLE_SPU_ERROR_INTR                    0x4L
 524#define CLASS0_ENABLE_MFC_FIR_INTR                      0x8L
 525#define CLASS1_ENABLE_SEGMENT_FAULT_INTR                0x1L
 526#define CLASS1_ENABLE_STORAGE_FAULT_INTR                0x2L
 527#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_GET_INTR    0x4L
 528#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_PUT_INTR    0x8L
 529#define CLASS2_ENABLE_MAILBOX_INTR                      0x1L
 530#define CLASS2_ENABLE_SPU_STOP_INTR                     0x2L
 531#define CLASS2_ENABLE_SPU_HALT_INTR                     0x4L
 532#define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR   0x8L
 533#define CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR            0x10L
 534        u8  pad_0x118_0x140[0x28];                              /* 0x118 */
 535        u64 int_stat_RW[3];                                     /* 0x140 */
 536#define CLASS0_DMA_ALIGNMENT_INTR                       0x1L
 537#define CLASS0_INVALID_DMA_COMMAND_INTR                 0x2L
 538#define CLASS0_SPU_ERROR_INTR                           0x4L
 539#define CLASS0_INTR_MASK                                0x7L
 540#define CLASS1_SEGMENT_FAULT_INTR                       0x1L
 541#define CLASS1_STORAGE_FAULT_INTR                       0x2L
 542#define CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR           0x4L
 543#define CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR           0x8L
 544#define CLASS1_INTR_MASK                                0xfL
 545#define CLASS2_MAILBOX_INTR                             0x1L
 546#define CLASS2_SPU_STOP_INTR                            0x2L
 547#define CLASS2_SPU_HALT_INTR                            0x4L
 548#define CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR          0x8L
 549#define CLASS2_MAILBOX_THRESHOLD_INTR                   0x10L
 550#define CLASS2_INTR_MASK                                0x1fL
 551        u8  pad_0x158_0x180[0x28];                              /* 0x158 */
 552        u64 int_route_RW;                                       /* 0x180 */
 553
 554        /* Interrupt Routing */
 555        u8  pad_0x188_0x200[0x200 - 0x188];                     /* 0x188 */
 556
 557        /* Atomic Unit Control Area */
 558        u64 mfc_atomic_flush_RW;                                /* 0x200 */
 559#define mfc_atomic_flush_enable                 0x1L
 560        u8  pad_0x208_0x280[0x78];                              /* 0x208 */
 561        u64 resource_allocation_groupID_RW;                     /* 0x280 */
 562        u64 resource_allocation_enable_RW;                      /* 0x288 */
 563        u8  pad_0x290_0x3c8[0x3c8 - 0x290];                     /* 0x290 */
 564
 565        /* SPU_Cache_ImplRegs: Implementation-dependent cache registers */
 566
 567        u64 smf_sbi_signal_sel;                                 /* 0x3c8 */
 568#define smf_sbi_mask_lsb        56
 569#define smf_sbi_shift           (63 - smf_sbi_mask_lsb)
 570#define smf_sbi_mask            (0x301LL << smf_sbi_shift)
 571#define smf_sbi_bus0_bits       (0x001LL << smf_sbi_shift)
 572#define smf_sbi_bus2_bits       (0x100LL << smf_sbi_shift)
 573#define smf_sbi2_bus0_bits      (0x201LL << smf_sbi_shift)
 574#define smf_sbi2_bus2_bits      (0x300LL << smf_sbi_shift)
 575        u64 smf_ato_signal_sel;                                 /* 0x3d0 */
 576#define smf_ato_mask_lsb        35
 577#define smf_ato_shift           (63 - smf_ato_mask_lsb)
 578#define smf_ato_mask            (0x3LL << smf_ato_shift)
 579#define smf_ato_bus0_bits       (0x2LL << smf_ato_shift)
 580#define smf_ato_bus2_bits       (0x1LL << smf_ato_shift)
 581        u8  pad_0x3d8_0x400[0x400 - 0x3d8];                     /* 0x3d8 */
 582
 583        /* TLB Management Registers */
 584        u64 mfc_sdr_RW;                                         /* 0x400 */
 585        u8  pad_0x408_0x500[0xf8];                              /* 0x408 */
 586        u64 tlb_index_hint_RO;                                  /* 0x500 */
 587        u64 tlb_index_W;                                        /* 0x508 */
 588        u64 tlb_vpn_RW;                                         /* 0x510 */
 589        u64 tlb_rpn_RW;                                         /* 0x518 */
 590        u8  pad_0x520_0x540[0x20];                              /* 0x520 */
 591        u64 tlb_invalidate_entry_W;                             /* 0x540 */
 592        u64 tlb_invalidate_all_W;                               /* 0x548 */
 593        u8  pad_0x550_0x580[0x580 - 0x550];                     /* 0x550 */
 594
 595        /* SPU_MMU_ImplRegs: Implementation-dependent MMU registers */
 596        u64 smm_hid;                                            /* 0x580 */
 597#define PAGE_SIZE_MASK          0xf000000000000000ull
 598#define PAGE_SIZE_16MB_64KB     0x2000000000000000ull
 599        u8  pad_0x588_0x600[0x600 - 0x588];                     /* 0x588 */
 600
 601        /* MFC Status/Control Area */
 602        u64 mfc_accr_RW;                                        /* 0x600 */
 603#define MFC_ACCR_EA_ACCESS_GET          (1 << 0)
 604#define MFC_ACCR_EA_ACCESS_PUT          (1 << 1)
 605#define MFC_ACCR_LS_ACCESS_GET          (1 << 3)
 606#define MFC_ACCR_LS_ACCESS_PUT          (1 << 4)
 607        u8  pad_0x608_0x610[0x8];                               /* 0x608 */
 608        u64 mfc_dsisr_RW;                                       /* 0x610 */
 609#define MFC_DSISR_PTE_NOT_FOUND         (1 << 30)
 610#define MFC_DSISR_ACCESS_DENIED         (1 << 27)
 611#define MFC_DSISR_ATOMIC                (1 << 26)
 612#define MFC_DSISR_ACCESS_PUT            (1 << 25)
 613#define MFC_DSISR_ADDR_MATCH            (1 << 22)
 614#define MFC_DSISR_LS                    (1 << 17)
 615#define MFC_DSISR_L                     (1 << 16)
 616#define MFC_DSISR_ADDRESS_OVERFLOW      (1 << 0)
 617        u8  pad_0x618_0x620[0x8];                               /* 0x618 */
 618        u64 mfc_dar_RW;                                         /* 0x620 */
 619        u8  pad_0x628_0x700[0x700 - 0x628];                     /* 0x628 */
 620
 621        /* Replacement Management Table (RMT) Area */
 622        u64 rmt_index_RW;                                       /* 0x700 */
 623        u8  pad_0x708_0x710[0x8];                               /* 0x708 */
 624        u64 rmt_data1_RW;                                       /* 0x710 */
 625        u8  pad_0x718_0x800[0x800 - 0x718];                     /* 0x718 */
 626
 627        /* Control/Configuration Registers */
 628        u64 mfc_dsir_R;                                         /* 0x800 */
 629#define MFC_DSIR_Q                      (1 << 31)
 630#define MFC_DSIR_SPU_QUEUE              MFC_DSIR_Q
 631        u64 mfc_lsacr_RW;                                       /* 0x808 */
 632#define MFC_LSACR_COMPARE_MASK          ((~0ull) << 32)
 633#define MFC_LSACR_COMPARE_ADDR          ((~0ull) >> 32)
 634        u64 mfc_lscrr_R;                                        /* 0x810 */
 635#define MFC_LSCRR_Q                     (1 << 31)
 636#define MFC_LSCRR_SPU_QUEUE             MFC_LSCRR_Q
 637#define MFC_LSCRR_QI_SHIFT              32
 638#define MFC_LSCRR_QI_MASK               ((~0ull) << MFC_LSCRR_QI_SHIFT)
 639        u8  pad_0x818_0x820[0x8];                               /* 0x818 */
 640        u64 mfc_tclass_id_RW;                                   /* 0x820 */
 641#define MFC_TCLASS_ID_ENABLE            (1L << 0L)
 642#define MFC_TCLASS_SLOT2_ENABLE         (1L << 5L)
 643#define MFC_TCLASS_SLOT1_ENABLE         (1L << 6L)
 644#define MFC_TCLASS_SLOT0_ENABLE         (1L << 7L)
 645#define MFC_TCLASS_QUOTA_2_SHIFT        8L
 646#define MFC_TCLASS_QUOTA_1_SHIFT        16L
 647#define MFC_TCLASS_QUOTA_0_SHIFT        24L
 648#define MFC_TCLASS_QUOTA_2_MASK         (0x1FL << MFC_TCLASS_QUOTA_2_SHIFT)
 649#define MFC_TCLASS_QUOTA_1_MASK         (0x1FL << MFC_TCLASS_QUOTA_1_SHIFT)
 650#define MFC_TCLASS_QUOTA_0_MASK         (0x1FL << MFC_TCLASS_QUOTA_0_SHIFT)
 651        u8  pad_0x828_0x900[0x900 - 0x828];                     /* 0x828 */
 652
 653        /* Real Mode Support Registers */
 654        u64 mfc_rm_boundary;                                    /* 0x900 */
 655        u8  pad_0x908_0x938[0x30];                              /* 0x908 */
 656        u64 smf_dma_signal_sel;                                 /* 0x938 */
 657#define mfc_dma1_mask_lsb       41
 658#define mfc_dma1_shift          (63 - mfc_dma1_mask_lsb)
 659#define mfc_dma1_mask           (0x3LL << mfc_dma1_shift)
 660#define mfc_dma1_bits           (0x1LL << mfc_dma1_shift)
 661#define mfc_dma2_mask_lsb       43
 662#define mfc_dma2_shift          (63 - mfc_dma2_mask_lsb)
 663#define mfc_dma2_mask           (0x3LL << mfc_dma2_shift)
 664#define mfc_dma2_bits           (0x1LL << mfc_dma2_shift)
 665        u8  pad_0x940_0xa38[0xf8];                              /* 0x940 */
 666        u64 smm_signal_sel;                                     /* 0xa38 */
 667#define smm_sig_mask_lsb        12
 668#define smm_sig_shift           (63 - smm_sig_mask_lsb)
 669#define smm_sig_mask            (0x3LL << smm_sig_shift)
 670#define smm_sig_bus0_bits       (0x2LL << smm_sig_shift)
 671#define smm_sig_bus2_bits       (0x1LL << smm_sig_shift)
 672        u8  pad_0xa40_0xc00[0xc00 - 0xa40];                     /* 0xa40 */
 673
 674        /* DMA Command Error Area */
 675        u64 mfc_cer_R;                                          /* 0xc00 */
 676#define MFC_CER_Q               (1 << 31)
 677#define MFC_CER_SPU_QUEUE       MFC_CER_Q
 678        u8  pad_0xc08_0x1000[0x1000 - 0xc08];                   /* 0xc08 */
 679
 680        /* PV1_ImplRegs: Implementation-dependent privileged-state 1 regs */
 681        /* DMA Command Error Area */
 682        u64 spu_ecc_cntl_RW;                                    /* 0x1000 */
 683#define SPU_ECC_CNTL_E                  (1ull << 0ull)
 684#define SPU_ECC_CNTL_ENABLE             SPU_ECC_CNTL_E
 685#define SPU_ECC_CNTL_DISABLE            (~SPU_ECC_CNTL_E & 1L)
 686#define SPU_ECC_CNTL_S                  (1ull << 1ull)
 687#define SPU_ECC_STOP_AFTER_ERROR        SPU_ECC_CNTL_S
 688#define SPU_ECC_CONTINUE_AFTER_ERROR    (~SPU_ECC_CNTL_S & 2L)
 689#define SPU_ECC_CNTL_B                  (1ull << 2ull)
 690#define SPU_ECC_BACKGROUND_ENABLE       SPU_ECC_CNTL_B
 691#define SPU_ECC_BACKGROUND_DISABLE      (~SPU_ECC_CNTL_B & 4L)
 692#define SPU_ECC_CNTL_I_SHIFT            3ull
 693#define SPU_ECC_CNTL_I_MASK             (3ull << SPU_ECC_CNTL_I_SHIFT)
 694#define SPU_ECC_WRITE_ALWAYS            (~SPU_ECC_CNTL_I & 12L)
 695#define SPU_ECC_WRITE_CORRECTABLE       (1ull << SPU_ECC_CNTL_I_SHIFT)
 696#define SPU_ECC_WRITE_UNCORRECTABLE     (3ull << SPU_ECC_CNTL_I_SHIFT)
 697#define SPU_ECC_CNTL_D                  (1ull << 5ull)
 698#define SPU_ECC_DETECTION_ENABLE        SPU_ECC_CNTL_D
 699#define SPU_ECC_DETECTION_DISABLE       (~SPU_ECC_CNTL_D & 32L)
 700        u64 spu_ecc_stat_RW;                                    /* 0x1008 */
 701#define SPU_ECC_CORRECTED_ERROR         (1ull << 0ul)
 702#define SPU_ECC_UNCORRECTED_ERROR       (1ull << 1ul)
 703#define SPU_ECC_SCRUB_COMPLETE          (1ull << 2ul)
 704#define SPU_ECC_SCRUB_IN_PROGRESS       (1ull << 3ul)
 705#define SPU_ECC_INSTRUCTION_ERROR       (1ull << 4ul)
 706#define SPU_ECC_DATA_ERROR              (1ull << 5ul)
 707#define SPU_ECC_DMA_ERROR               (1ull << 6ul)
 708#define SPU_ECC_STATUS_CNT_MASK         (256ull << 8)
 709        u64 spu_ecc_addr_RW;                                    /* 0x1010 */
 710        u64 spu_err_mask_RW;                                    /* 0x1018 */
 711#define SPU_ERR_ILLEGAL_INSTR           (1ull << 0ul)
 712#define SPU_ERR_ILLEGAL_CHANNEL         (1ull << 1ul)
 713        u8  pad_0x1020_0x1028[0x1028 - 0x1020];                 /* 0x1020 */
 714
 715        /* SPU Debug-Trace Bus (DTB) Selection Registers */
 716        u64 spu_trig0_sel;                                      /* 0x1028 */
 717        u64 spu_trig1_sel;                                      /* 0x1030 */
 718        u64 spu_trig2_sel;                                      /* 0x1038 */
 719        u64 spu_trig3_sel;                                      /* 0x1040 */
 720        u64 spu_trace_sel;                                      /* 0x1048 */
 721#define spu_trace_sel_mask              0x1f1fLL
 722#define spu_trace_sel_bus0_bits         0x1000LL
 723#define spu_trace_sel_bus2_bits         0x0010LL
 724        u64 spu_event0_sel;                                     /* 0x1050 */
 725        u64 spu_event1_sel;                                     /* 0x1058 */
 726        u64 spu_event2_sel;                                     /* 0x1060 */
 727        u64 spu_event3_sel;                                     /* 0x1068 */
 728        u64 spu_trace_cntl;                                     /* 0x1070 */
 729} __attribute__ ((aligned(0x2000)));
 730
 731#endif /* __KERNEL__ */
 732#endif
 733