1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/slab.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/errno.h>
23#include <linux/err.h>
24
25#include <asm/outercache.h>
26#include <asm/cacheflush.h>
27
28#include "scm.h"
29
30#define SCM_ENOMEM -5
31#define SCM_EOPNOTSUPP -4
32#define SCM_EINVAL_ADDR -3
33#define SCM_EINVAL_ARG -2
34#define SCM_ERROR -1
35#define SCM_INTERRUPTED 1
36
37static DEFINE_MUTEX(scm_lock);
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63struct scm_command {
64 __le32 len;
65 __le32 buf_offset;
66 __le32 resp_hdr_offset;
67 __le32 id;
68 __le32 buf[0];
69};
70
71
72
73
74
75
76
77struct scm_response {
78 __le32 len;
79 __le32 buf_offset;
80 __le32 is_complete;
81};
82
83
84
85
86
87
88
89
90
91
92
93static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
94{
95 struct scm_command *cmd;
96 size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
97 resp_size;
98 u32 offset;
99
100 cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
101 if (cmd) {
102 cmd->len = cpu_to_le32(len);
103 offset = offsetof(struct scm_command, buf);
104 cmd->buf_offset = cpu_to_le32(offset);
105 cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size);
106 }
107 return cmd;
108}
109
110
111
112
113
114
115
116static inline void free_scm_command(struct scm_command *cmd)
117{
118 kfree(cmd);
119}
120
121
122
123
124
125
126
127static inline struct scm_response *scm_command_to_response(
128 const struct scm_command *cmd)
129{
130 return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
131}
132
133
134
135
136
137
138
139static inline void *scm_get_command_buffer(const struct scm_command *cmd)
140{
141 return (void *)cmd->buf;
142}
143
144
145
146
147
148
149
150static inline void *scm_get_response_buffer(const struct scm_response *rsp)
151{
152 return (void *)rsp + le32_to_cpu(rsp->buf_offset);
153}
154
155static int scm_remap_error(int err)
156{
157 pr_err("scm_call failed with error code %d\n", err);
158 switch (err) {
159 case SCM_ERROR:
160 return -EIO;
161 case SCM_EINVAL_ADDR:
162 case SCM_EINVAL_ARG:
163 return -EINVAL;
164 case SCM_EOPNOTSUPP:
165 return -EOPNOTSUPP;
166 case SCM_ENOMEM:
167 return -ENOMEM;
168 }
169 return -EINVAL;
170}
171
172static u32 smc(u32 cmd_addr)
173{
174 int context_id;
175 register u32 r0 asm("r0") = 1;
176 register u32 r1 asm("r1") = (u32)&context_id;
177 register u32 r2 asm("r2") = cmd_addr;
178 do {
179 asm volatile(
180 __asmeq("%0", "r0")
181 __asmeq("%1", "r0")
182 __asmeq("%2", "r1")
183 __asmeq("%3", "r2")
184#ifdef REQUIRES_SEC
185 ".arch_extension sec\n"
186#endif
187 "smc #0 @ switch to secure world\n"
188 : "=r" (r0)
189 : "r" (r0), "r" (r1), "r" (r2)
190 : "r3");
191 } while (r0 == SCM_INTERRUPTED);
192
193 return r0;
194}
195
196static int __scm_call(const struct scm_command *cmd)
197{
198 int ret;
199 u32 cmd_addr = virt_to_phys(cmd);
200
201
202
203
204
205 __cpuc_flush_dcache_area((void *)cmd, cmd->len);
206 outer_flush_range(cmd_addr, cmd_addr + cmd->len);
207
208 ret = smc(cmd_addr);
209 if (ret < 0)
210 ret = scm_remap_error(ret);
211
212 return ret;
213}
214
215static void scm_inv_range(unsigned long start, unsigned long end)
216{
217 u32 cacheline_size, ctr;
218
219 asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
220 cacheline_size = 4 << ((ctr >> 16) & 0xf);
221
222 start = round_down(start, cacheline_size);
223 end = round_up(end, cacheline_size);
224 outer_inv_range(start, end);
225 while (start < end) {
226 asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
227 : "memory");
228 start += cacheline_size;
229 }
230 dsb();
231 isb();
232}
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
253 void *resp_buf, size_t resp_len)
254{
255 int ret;
256 struct scm_command *cmd;
257 struct scm_response *rsp;
258 unsigned long start, end;
259
260 cmd = alloc_scm_command(cmd_len, resp_len);
261 if (!cmd)
262 return -ENOMEM;
263
264 cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
265 if (cmd_buf)
266 memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
267
268 mutex_lock(&scm_lock);
269 ret = __scm_call(cmd);
270 mutex_unlock(&scm_lock);
271 if (ret)
272 goto out;
273
274 rsp = scm_command_to_response(cmd);
275 start = (unsigned long)rsp;
276
277 do {
278 scm_inv_range(start, start + sizeof(*rsp));
279 } while (!rsp->is_complete);
280
281 end = (unsigned long)scm_get_response_buffer(rsp) + resp_len;
282 scm_inv_range(start, end);
283
284 if (resp_buf)
285 memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
286out:
287 free_scm_command(cmd);
288 return ret;
289}
290EXPORT_SYMBOL(scm_call);
291
292u32 scm_get_version(void)
293{
294 int context_id;
295 static u32 version = -1;
296 register u32 r0 asm("r0");
297 register u32 r1 asm("r1");
298
299 if (version != -1)
300 return version;
301
302 mutex_lock(&scm_lock);
303
304 r0 = 0x1 << 8;
305 r1 = (u32)&context_id;
306 do {
307 asm volatile(
308 __asmeq("%0", "r0")
309 __asmeq("%1", "r1")
310 __asmeq("%2", "r0")
311 __asmeq("%3", "r1")
312#ifdef REQUIRES_SEC
313 ".arch_extension sec\n"
314#endif
315 "smc #0 @ switch to secure world\n"
316 : "=r" (r0), "=r" (r1)
317 : "r" (r0), "r" (r1)
318 : "r2", "r3");
319 } while (r0 == SCM_INTERRUPTED);
320
321 version = r1;
322 mutex_unlock(&scm_lock);
323
324 return version;
325}
326EXPORT_SYMBOL(scm_get_version);
327