1#ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
2#define _ASM_POWERPC_ASM_PROTOTYPES_H
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/threads.h>
16#include <asm/cacheflush.h>
17#include <asm/checksum.h>
18#include <linux/uaccess.h>
19#include <asm/epapr_hcalls.h>
20#include <asm/dcr.h>
21#include <asm/mmu_context.h>
22#include <asm/ultravisor-api.h>
23
24#include <uapi/asm/ucontext.h>
25
26
27extern struct thread_info *current_set[NR_CPUS];
28extern struct thread_info *secondary_ti;
29void start_secondary(void *unused);
30
31
32struct paca_struct;
33struct kimage;
34extern struct paca_struct kexec_paca;
35void kexec_copy_flush(struct kimage *image);
36
37
38extern struct static_key hcall_tracepoint_key;
39void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
40void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
41
42
43#if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
44long ucall_norets(unsigned long opcode, ...);
45#else
46static inline long ucall_norets(unsigned long opcode, ...)
47{
48 return U_NOT_AVAILABLE;
49}
50#endif
51
52
53int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
54 int64_t a4, int64_t a5, int64_t a6, int64_t a7,
55 int64_t opcode, uint64_t msr);
56
57
58int enter_vmx_usercopy(void);
59int exit_vmx_usercopy(void);
60int enter_vmx_copy(void);
61void * exit_vmx_copy(void *dest);
62
63
64long machine_check_early(struct pt_regs *regs);
65long hmi_exception_realmode(struct pt_regs *regs);
66void SMIException(struct pt_regs *regs);
67void handle_hmi_exception(struct pt_regs *regs);
68void instruction_breakpoint_exception(struct pt_regs *regs);
69void RunModeException(struct pt_regs *regs);
70void single_step_exception(struct pt_regs *regs);
71void program_check_exception(struct pt_regs *regs);
72void alignment_exception(struct pt_regs *regs);
73void slb_miss_bad_addr(struct pt_regs *regs);
74void StackOverflow(struct pt_regs *regs);
75void nonrecoverable_exception(struct pt_regs *regs);
76void kernel_fp_unavailable_exception(struct pt_regs *regs);
77void altivec_unavailable_exception(struct pt_regs *regs);
78void vsx_unavailable_exception(struct pt_regs *regs);
79void fp_unavailable_tm(struct pt_regs *regs);
80void altivec_unavailable_tm(struct pt_regs *regs);
81void vsx_unavailable_tm(struct pt_regs *regs);
82void facility_unavailable_exception(struct pt_regs *regs);
83void TAUException(struct pt_regs *regs);
84void altivec_assist_exception(struct pt_regs *regs);
85void unrecoverable_exception(struct pt_regs *regs);
86void kernel_bad_stack(struct pt_regs *regs);
87void system_reset_exception(struct pt_regs *regs);
88void machine_check_exception(struct pt_regs *regs);
89void emulation_assist_interrupt(struct pt_regs *regs);
90
91
92long sys_swapcontext(struct ucontext __user *old_ctx,
93 struct ucontext __user *new_ctx,
94 long ctx_size);
95#ifdef CONFIG_PPC32
96long sys_debug_setcontext(struct ucontext __user *ctx,
97 int ndbg, struct sig_dbg_op __user *dbg);
98int
99ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp);
100unsigned long __init early_init(unsigned long dt_ptr);
101void __init machine_init(u64 dt_ptr);
102#endif
103
104long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
105 u32 len_high, u32 len_low);
106long sys_switch_endian(void);
107notrace unsigned int __check_irq_replay(void);
108void notrace restore_interrupts(void);
109
110
111long do_syscall_trace_enter(struct pt_regs *regs);
112void do_syscall_trace_leave(struct pt_regs *regs);
113
114
115void restore_math(struct pt_regs *regs);
116void restore_tm_state(struct pt_regs *regs);
117
118
119unsigned long __init prom_init(unsigned long r3, unsigned long r4,
120 unsigned long pp,
121 unsigned long r6, unsigned long r7,
122 unsigned long kbase);
123
124
125void __init early_setup(unsigned long dt_ptr);
126void early_setup_secondary(void);
127
128
129void accumulate_stolen_time(void);
130
131
132extern u64 __bswapdi2(u64);
133extern s64 __lshrdi3(s64, int);
134extern s64 __ashldi3(s64, int);
135extern s64 __ashrdi3(s64, int);
136extern int __cmpdi2(s64, s64);
137extern int __ucmpdi2(u64, u64);
138
139
140void _mcount(void);
141unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
142
143void pnv_power9_force_smt4_catch(void);
144void pnv_power9_force_smt4_release(void);
145
146
147void tm_enable(void);
148void tm_disable(void);
149void tm_abort(uint8_t cause);
150
151struct kvm_vcpu;
152void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
153void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
154
155#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
156void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
157void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
158#else
159static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
160 bool preserve_nv) { }
161static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
162 bool preserve_nv) { }
163#endif
164
165void kvmhv_save_host_pmu(void);
166void kvmhv_load_host_pmu(void);
167void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
168void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
169
170int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
171
172long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
173long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
174 unsigned long dabrx);
175
176
177extern s32 patch__call_flush_branch_caches1;
178extern s32 patch__call_flush_branch_caches2;
179extern s32 patch__call_flush_branch_caches3;
180extern s32 patch__flush_count_cache_return;
181extern s32 patch__flush_link_stack_return;
182extern s32 patch__call_kvm_flush_link_stack;
183extern s32 patch__memset_nocache, patch__memcpy_nocache;
184
185extern long flush_branch_caches;
186extern long kvm_flush_link_stack;
187
188#endif
189