linux/arch/powerpc/include/asm/lppaca.h
<<
>>
Prefs
   1/*
   2 * lppaca.h
   3 * Copyright (C) 2001  Mike Corrigan IBM Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19#ifndef _ASM_POWERPC_LPPACA_H
  20#define _ASM_POWERPC_LPPACA_H
  21#ifdef __KERNEL__
  22
  23/*
  24 * These definitions relate to hypervisors that only exist when using
  25 * a server type processor
  26 */
  27#ifdef CONFIG_PPC_BOOK3S
  28
  29/*
  30 * This control block contains the data that is shared between the
  31 * hypervisor and the OS.
  32 */
  33#include <linux/cache.h>
  34#include <linux/threads.h>
  35#include <asm/types.h>
  36#include <asm/mmu.h>
  37
  38/*
  39 * We only have to have statically allocated lppaca structs on
  40 * legacy iSeries, which supports at most 64 cpus.
  41 */
  42#define NR_LPPACAS      1
  43
  44/*
  45 * The Hypervisor barfs if the lppaca crosses a page boundary.  A 1k
  46 * alignment is sufficient to prevent this
  47 */
  48struct lppaca {
  49        /* cacheline 1 contains read-only data */
  50
  51        __be32  desc;                   /* Eye catcher 0xD397D781 */
  52        __be16  size;                   /* Size of this struct */
  53        u8      reserved1[3];
  54        u8      __old_status;           /* Old status, including shared proc */
  55        u8      reserved3[14];
  56        volatile __be32 dyn_hw_node_id; /* Dynamic hardware node id */
  57        volatile __be32 dyn_hw_proc_id; /* Dynamic hardware proc id */
  58        u8      reserved4[56];
  59        volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
  60                                          /* associativity change counters */
  61        u8      reserved5[32];
  62
  63        /* cacheline 2 contains local read-write data */
  64
  65        u8      reserved6[48];
  66        u8      cede_latency_hint;
  67        u8      ebb_regs_in_use;
  68        u8      reserved7[6];
  69        u8      dtl_enable_mask;        /* Dispatch Trace Log mask */
  70        u8      donate_dedicated_cpu;   /* Donate dedicated CPU cycles */
  71        u8      fpregs_in_use;
  72        u8      pmcregs_in_use;
  73        u8      reserved8[28];
  74        __be64  wait_state_cycles;      /* Wait cycles for this proc */
  75        u8      reserved9[28];
  76        __be16  slb_count;              /* # of SLBs to maintain */
  77        u8      idle;                   /* Indicate OS is idle */
  78        u8      vmxregs_in_use;
  79
  80        /* cacheline 3 is shared with other processors */
  81
  82        /*
  83         * This is the yield_count.  An "odd" value (low bit on) means that
  84         * the processor is yielded (either because of an OS yield or a
  85         * hypervisor preempt).  An even value implies that the processor is
  86         * currently executing.
  87         * NOTE: Even dedicated processor partitions can yield so this
  88         * field cannot be used to determine if we are shared or dedicated.
  89         */
  90        volatile __be32 yield_count;
  91        volatile __be32 dispersion_count; /* dispatch changed physical cpu */
  92        volatile __be64 cmo_faults;     /* CMO page fault count */
  93        volatile __be64 cmo_fault_time; /* CMO page fault time */
  94        u8      reserved10[104];
  95
  96        /* cacheline 4-5 */
  97
  98        __be32  page_ins;               /* CMO Hint - # page ins by OS */
  99        u8      reserved11[148];
 100        volatile __be64 dtl_idx;                /* Dispatch Trace Log head index */
 101        u8      reserved12[96];
 102} __attribute__((__aligned__(0x400)));
 103
 104extern struct lppaca lppaca[];
 105
 106#define lppaca_of(cpu)  (*paca[cpu].lppaca_ptr)
 107
 108/*
 109 * We are using a non architected field to determine if a partition is
 110 * shared or dedicated. This currently works on both KVM and PHYP, but
 111 * we will have to transition to something better.
 112 */
 113#define LPPACA_OLD_SHARED_PROC          2
 114
 115static inline bool lppaca_shared_proc(struct lppaca *l)
 116{
 117        return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
 118}
 119
 120/*
 121 * SLB shadow buffer structure as defined in the PAPR.  The save_area
 122 * contains adjacent ESID and VSID pairs for each shadowed SLB.  The
 123 * ESID is stored in the lower 64bits, then the VSID.
 124 */
 125struct slb_shadow {
 126        __be32  persistent;             /* Number of persistent SLBs */
 127        __be32  buffer_length;          /* Total shadow buffer length */
 128        __be64  reserved;
 129        struct  {
 130                __be64     esid;
 131                __be64  vsid;
 132        } save_area[SLB_NUM_BOLTED];
 133} ____cacheline_aligned;
 134
 135/*
 136 * Layout of entries in the hypervisor's dispatch trace log buffer.
 137 */
 138struct dtl_entry {
 139        u8      dispatch_reason;
 140        u8      preempt_reason;
 141        __be16  processor_id;
 142        __be32  enqueue_to_dispatch_time;
 143        __be32  ready_to_enqueue_time;
 144        __be32  waiting_to_ready_time;
 145        __be64  timebase;
 146        __be64  fault_addr;
 147        __be64  srr0;
 148        __be64  srr1;
 149};
 150
 151#define DISPATCH_LOG_BYTES      4096    /* bytes per cpu */
 152#define N_DISPATCH_LOG          (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
 153
 154extern struct kmem_cache *dtl_cache;
 155
 156/*
 157 * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
 158 * reading from the dispatch trace log.  If other code wants to consume
 159 * DTL entries, it can set this pointer to a function that will get
 160 * called once for each DTL entry that gets processed.
 161 */
 162extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
 163
 164#endif /* CONFIG_PPC_BOOK3S */
 165#endif /* __KERNEL__ */
 166#endif /* _ASM_POWERPC_LPPACA_H */
 167