1
2#ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
3#define _ASM_POWERPC_ASM_PROTOTYPES_H
4
5
6
7
8
9
10
11#include <linux/threads.h>
12#include <asm/cacheflush.h>
13#include <asm/checksum.h>
14#include <linux/uaccess.h>
15#include <asm/epapr_hcalls.h>
16#include <asm/dcr.h>
17#include <asm/mmu_context.h>
18#include <asm/ultravisor-api.h>
19
20#include <uapi/asm/ucontext.h>
21
22
23extern struct task_struct *current_set[NR_CPUS];
24extern struct task_struct *secondary_current;
25void start_secondary(void *unused);
26
27
28struct paca_struct;
29struct kimage;
30extern struct paca_struct kexec_paca;
31void kexec_copy_flush(struct kimage *image);
32
33
34extern struct static_key hcall_tracepoint_key;
35void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
36void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
37
38
39#if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
40long ucall_norets(unsigned long opcode, ...);
41#else
42static inline long ucall_norets(unsigned long opcode, ...)
43{
44 return U_NOT_AVAILABLE;
45}
46#endif
47
48
49int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
50 int64_t a4, int64_t a5, int64_t a6, int64_t a7,
51 int64_t opcode, uint64_t msr);
52
53
54int enter_vmx_usercopy(void);
55int exit_vmx_usercopy(void);
56int enter_vmx_ops(void);
57void *exit_vmx_ops(void *dest);
58
59
60long machine_check_early(struct pt_regs *regs);
61long hmi_exception_realmode(struct pt_regs *regs);
62void SMIException(struct pt_regs *regs);
63void handle_hmi_exception(struct pt_regs *regs);
64void instruction_breakpoint_exception(struct pt_regs *regs);
65void RunModeException(struct pt_regs *regs);
66void single_step_exception(struct pt_regs *regs);
67void program_check_exception(struct pt_regs *regs);
68void alignment_exception(struct pt_regs *regs);
69void StackOverflow(struct pt_regs *regs);
70void kernel_fp_unavailable_exception(struct pt_regs *regs);
71void altivec_unavailable_exception(struct pt_regs *regs);
72void vsx_unavailable_exception(struct pt_regs *regs);
73void fp_unavailable_tm(struct pt_regs *regs);
74void altivec_unavailable_tm(struct pt_regs *regs);
75void vsx_unavailable_tm(struct pt_regs *regs);
76void facility_unavailable_exception(struct pt_regs *regs);
77void TAUException(struct pt_regs *regs);
78void altivec_assist_exception(struct pt_regs *regs);
79void unrecoverable_exception(struct pt_regs *regs);
80void kernel_bad_stack(struct pt_regs *regs);
81void system_reset_exception(struct pt_regs *regs);
82void machine_check_exception(struct pt_regs *regs);
83void emulation_assist_interrupt(struct pt_regs *regs);
84long do_slb_fault(struct pt_regs *regs, unsigned long ea);
85void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err);
86
87
88long sys_swapcontext(struct ucontext __user *old_ctx,
89 struct ucontext __user *new_ctx,
90 long ctx_size);
91#ifdef CONFIG_PPC32
92long sys_debug_setcontext(struct ucontext __user *ctx,
93 int ndbg, struct sig_dbg_op __user *dbg);
94int
95ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
96 struct __kernel_old_timeval __user *tvp);
97unsigned long __init early_init(unsigned long dt_ptr);
98void __init machine_init(u64 dt_ptr);
99#endif
100
101long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
102 u32 len_high, u32 len_low);
103long sys_switch_endian(void);
104notrace unsigned int __check_irq_replay(void);
105void notrace restore_interrupts(void);
106
107
108long do_syscall_trace_enter(struct pt_regs *regs);
109void do_syscall_trace_leave(struct pt_regs *regs);
110
111
112void restore_math(struct pt_regs *regs);
113void restore_tm_state(struct pt_regs *regs);
114
115
116unsigned long __init prom_init(unsigned long r3, unsigned long r4,
117 unsigned long pp,
118 unsigned long r6, unsigned long r7,
119 unsigned long kbase);
120
121
122void __init early_setup(unsigned long dt_ptr);
123void early_setup_secondary(void);
124
125
126void accumulate_stolen_time(void);
127
128
129extern u64 __bswapdi2(u64);
130extern s64 __lshrdi3(s64, int);
131extern s64 __ashldi3(s64, int);
132extern s64 __ashrdi3(s64, int);
133extern int __cmpdi2(s64, s64);
134extern int __ucmpdi2(u64, u64);
135
136
137void _mcount(void);
138unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
139 unsigned long sp);
140
141void pnv_power9_force_smt4_catch(void);
142void pnv_power9_force_smt4_release(void);
143
144
145void tm_enable(void);
146void tm_disable(void);
147void tm_abort(uint8_t cause);
148
149struct kvm_vcpu;
150void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
151void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
152
153
154extern s32 patch__call_flush_count_cache;
155extern s32 patch__flush_count_cache_return;
156extern s32 patch__flush_link_stack_return;
157extern s32 patch__call_kvm_flush_link_stack;
158extern s32 patch__memset_nocache, patch__memcpy_nocache;
159
160extern long flush_count_cache;
161extern long kvm_flush_link_stack;
162
163#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
164void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
165void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
166#else
167static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
168 bool preserve_nv) { }
169static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
170 bool preserve_nv) { }
171#endif
172
173void kvmhv_save_host_pmu(void);
174void kvmhv_load_host_pmu(void);
175void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
176void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
177
178int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
179
180long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
181long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
182 unsigned long dabrx);
183
184#endif
185