1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "cpu.h"
21#include "helper.h"
22
23
24
25#ifndef CONFIG_USER_ONLY
26
27uint64_t helper_ldl_phys(uint64_t p)
28{
29 return (int32_t)ldl_phys(p);
30}
31
32uint64_t helper_ldq_phys(uint64_t p)
33{
34 return ldq_phys(p);
35}
36
37uint64_t helper_ldl_l_phys(CPUAlphaState *env, uint64_t p)
38{
39 env->lock_addr = p;
40 return env->lock_value = (int32_t)ldl_phys(p);
41}
42
43uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p)
44{
45 env->lock_addr = p;
46 return env->lock_value = ldq_phys(p);
47}
48
49void helper_stl_phys(uint64_t p, uint64_t v)
50{
51 stl_phys(p, v);
52}
53
54void helper_stq_phys(uint64_t p, uint64_t v)
55{
56 stq_phys(p, v);
57}
58
59uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
60{
61 uint64_t ret = 0;
62
63 if (p == env->lock_addr) {
64 int32_t old = ldl_phys(p);
65 if (old == (int32_t)env->lock_value) {
66 stl_phys(p, v);
67 ret = 1;
68 }
69 }
70 env->lock_addr = -1;
71
72 return ret;
73}
74
75uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
76{
77 uint64_t ret = 0;
78
79 if (p == env->lock_addr) {
80 uint64_t old = ldq_phys(p);
81 if (old == env->lock_value) {
82 stq_phys(p, v);
83 ret = 1;
84 }
85 }
86 env->lock_addr = -1;
87
88 return ret;
89}
90
91static void do_unaligned_access(CPUAlphaState *env, target_ulong addr,
92 int is_write, int is_user, uintptr_t retaddr)
93{
94 uint64_t pc;
95 uint32_t insn;
96
97 if (retaddr) {
98 cpu_restore_state(env, retaddr);
99 }
100
101 pc = env->pc;
102 insn = cpu_ldl_code(env, pc);
103
104 env->trap_arg0 = addr;
105 env->trap_arg1 = insn >> 26;
106 env->trap_arg2 = (insn >> 21) & 31;
107 env->exception_index = EXCP_UNALIGN;
108 env->error_code = 0;
109 cpu_loop_exit(env);
110}
111
112void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr,
113 bool is_write, bool is_exec, int unused,
114 unsigned size)
115{
116 AlphaCPU *cpu = ALPHA_CPU(cs);
117 CPUAlphaState *env = &cpu->env;
118
119 env->trap_arg0 = addr;
120 env->trap_arg1 = is_write ? 1 : 0;
121 dynamic_excp(env, 0, EXCP_MCHK, 0);
122}
123
124#include "exec/softmmu_exec.h"
125
126#define MMUSUFFIX _mmu
127#define ALIGNED_ONLY
128
129#define SHIFT 0
130#include "exec/softmmu_template.h"
131
132#define SHIFT 1
133#include "exec/softmmu_template.h"
134
135#define SHIFT 2
136#include "exec/softmmu_template.h"
137
138#define SHIFT 3
139#include "exec/softmmu_template.h"
140
141
142
143
144
145void tlb_fill(CPUAlphaState *env, target_ulong addr, int is_write,
146 int mmu_idx, uintptr_t retaddr)
147{
148 int ret;
149
150 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx);
151 if (unlikely(ret != 0)) {
152 if (retaddr) {
153 cpu_restore_state(env, retaddr);
154 }
155
156 cpu_loop_exit(env);
157 }
158}
159#endif
160