1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/slab.h>
20#include <linux/io.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/errno.h>
24#include <linux/err.h>
25#include <linux/qcom_scm.h>
26#include <linux/dma-mapping.h>
27
28#include "qcom_scm.h"
29
30#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
31#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
32#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
33#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
34
35#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
36#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
37#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
38#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
39
40struct qcom_scm_entry {
41 int flag;
42 void *entry;
43};
44
45static struct qcom_scm_entry qcom_scm_wb[] = {
46 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
47 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
48 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
49 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
50};
51
52static DEFINE_MUTEX(qcom_scm_lock);
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78struct qcom_scm_command {
79 __le32 len;
80 __le32 buf_offset;
81 __le32 resp_hdr_offset;
82 __le32 id;
83 __le32 buf[0];
84};
85
86
87
88
89
90
91
92struct qcom_scm_response {
93 __le32 len;
94 __le32 buf_offset;
95 __le32 is_complete;
96};
97
98
99
100
101
102
103
104static inline struct qcom_scm_response *qcom_scm_command_to_response(
105 const struct qcom_scm_command *cmd)
106{
107 return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
108}
109
110
111
112
113
114
115
116static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
117{
118 return (void *)cmd->buf;
119}
120
121
122
123
124
125
126
127static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
128{
129 return (void *)rsp + le32_to_cpu(rsp->buf_offset);
130}
131
132static u32 smc(u32 cmd_addr)
133{
134 int context_id;
135 register u32 r0 asm("r0") = 1;
136 register u32 r1 asm("r1") = (u32)&context_id;
137 register u32 r2 asm("r2") = cmd_addr;
138 do {
139 asm volatile(
140 __asmeq("%0", "r0")
141 __asmeq("%1", "r0")
142 __asmeq("%2", "r1")
143 __asmeq("%3", "r2")
144#ifdef REQUIRES_SEC
145 ".arch_extension sec\n"
146#endif
147 "smc #0 @ switch to secure world\n"
148 : "=r" (r0)
149 : "r" (r0), "r" (r1), "r" (r2)
150 : "r3");
151 } while (r0 == QCOM_SCM_INTERRUPTED);
152
153 return r0;
154}
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
176 const void *cmd_buf, size_t cmd_len, void *resp_buf,
177 size_t resp_len)
178{
179 int ret;
180 struct qcom_scm_command *cmd;
181 struct qcom_scm_response *rsp;
182 size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
183 dma_addr_t cmd_phys;
184
185 cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
186 if (!cmd)
187 return -ENOMEM;
188
189 cmd->len = cpu_to_le32(alloc_len);
190 cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
191 cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
192
193 cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
194 if (cmd_buf)
195 memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
196
197 rsp = qcom_scm_command_to_response(cmd);
198
199 cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
200 if (dma_mapping_error(dev, cmd_phys)) {
201 kfree(cmd);
202 return -ENOMEM;
203 }
204
205 mutex_lock(&qcom_scm_lock);
206 ret = smc(cmd_phys);
207 if (ret < 0)
208 ret = qcom_scm_remap_error(ret);
209 mutex_unlock(&qcom_scm_lock);
210 if (ret)
211 goto out;
212
213 do {
214 dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
215 sizeof(*rsp), DMA_FROM_DEVICE);
216 } while (!rsp->is_complete);
217
218 if (resp_buf) {
219 dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
220 le32_to_cpu(rsp->buf_offset),
221 resp_len, DMA_FROM_DEVICE);
222 memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
223 resp_len);
224 }
225out:
226 dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
227 kfree(cmd);
228 return ret;
229}
230
231#define SCM_CLASS_REGISTER (0x2 << 8)
232#define SCM_MASK_IRQS BIT(5)
233#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
234 SCM_CLASS_REGISTER | \
235 SCM_MASK_IRQS | \
236 (n & 0xf))
237
238
239
240
241
242
243
244
245
246
247static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
248{
249 int context_id;
250
251 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
252 register u32 r1 asm("r1") = (u32)&context_id;
253 register u32 r2 asm("r2") = arg1;
254
255 asm volatile(
256 __asmeq("%0", "r0")
257 __asmeq("%1", "r0")
258 __asmeq("%2", "r1")
259 __asmeq("%3", "r2")
260#ifdef REQUIRES_SEC
261 ".arch_extension sec\n"
262#endif
263 "smc #0 @ switch to secure world\n"
264 : "=r" (r0)
265 : "r" (r0), "r" (r1), "r" (r2)
266 : "r3");
267 return r0;
268}
269
270
271
272
273
274
275
276
277
278
279
280static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
281{
282 int context_id;
283
284 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
285 register u32 r1 asm("r1") = (u32)&context_id;
286 register u32 r2 asm("r2") = arg1;
287 register u32 r3 asm("r3") = arg2;
288
289 asm volatile(
290 __asmeq("%0", "r0")
291 __asmeq("%1", "r0")
292 __asmeq("%2", "r1")
293 __asmeq("%3", "r2")
294 __asmeq("%4", "r3")
295#ifdef REQUIRES_SEC
296 ".arch_extension sec\n"
297#endif
298 "smc #0 @ switch to secure world\n"
299 : "=r" (r0)
300 : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
301 );
302 return r0;
303}
304
305u32 qcom_scm_get_version(void)
306{
307 int context_id;
308 static u32 version = -1;
309 register u32 r0 asm("r0");
310 register u32 r1 asm("r1");
311
312 if (version != -1)
313 return version;
314
315 mutex_lock(&qcom_scm_lock);
316
317 r0 = 0x1 << 8;
318 r1 = (u32)&context_id;
319 do {
320 asm volatile(
321 __asmeq("%0", "r0")
322 __asmeq("%1", "r1")
323 __asmeq("%2", "r0")
324 __asmeq("%3", "r1")
325#ifdef REQUIRES_SEC
326 ".arch_extension sec\n"
327#endif
328 "smc #0 @ switch to secure world\n"
329 : "=r" (r0), "=r" (r1)
330 : "r" (r0), "r" (r1)
331 : "r2", "r3");
332 } while (r0 == QCOM_SCM_INTERRUPTED);
333
334 version = r1;
335 mutex_unlock(&qcom_scm_lock);
336
337 return version;
338}
339EXPORT_SYMBOL(qcom_scm_get_version);
340
341
342
343
344
345
346
347
348
349int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
350{
351 int flags = 0;
352 int cpu;
353 int scm_cb_flags[] = {
354 QCOM_SCM_FLAG_COLDBOOT_CPU0,
355 QCOM_SCM_FLAG_COLDBOOT_CPU1,
356 QCOM_SCM_FLAG_COLDBOOT_CPU2,
357 QCOM_SCM_FLAG_COLDBOOT_CPU3,
358 };
359
360 if (!cpus || (cpus && cpumask_empty(cpus)))
361 return -EINVAL;
362
363 for_each_cpu(cpu, cpus) {
364 if (cpu < ARRAY_SIZE(scm_cb_flags))
365 flags |= scm_cb_flags[cpu];
366 else
367 set_cpu_present(cpu, false);
368 }
369
370 return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
371 flags, virt_to_phys(entry));
372}
373
374
375
376
377
378
379
380
381
382int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
383 const cpumask_t *cpus)
384{
385 int ret;
386 int flags = 0;
387 int cpu;
388 struct {
389 __le32 flags;
390 __le32 addr;
391 } cmd;
392
393
394
395
396
397 for_each_cpu(cpu, cpus) {
398 if (entry == qcom_scm_wb[cpu].entry)
399 continue;
400 flags |= qcom_scm_wb[cpu].flag;
401 }
402
403
404 if (!flags)
405 return 0;
406
407 cmd.addr = cpu_to_le32(virt_to_phys(entry));
408 cmd.flags = cpu_to_le32(flags);
409 ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
410 &cmd, sizeof(cmd), NULL, 0);
411 if (!ret) {
412 for_each_cpu(cpu, cpus)
413 qcom_scm_wb[cpu].entry = entry;
414 }
415
416 return ret;
417}
418
419
420
421
422
423
424
425
426
427void __qcom_scm_cpu_power_down(u32 flags)
428{
429 qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
430 flags & QCOM_SCM_FLUSH_FLAG_MASK);
431}
432
433int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
434{
435 int ret;
436 __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
437 __le32 ret_val = 0;
438
439 ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
440 &svc_cmd, sizeof(svc_cmd), &ret_val,
441 sizeof(ret_val));
442 if (ret)
443 return ret;
444
445 return le32_to_cpu(ret_val);
446}
447
448int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
449 u32 req_cnt, u32 *resp)
450{
451 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
452 return -ERANGE;
453
454 return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
455 req, req_cnt * sizeof(*req), resp, sizeof(*resp));
456}
457
458void __qcom_scm_init(void)
459{
460}
461
462bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
463{
464 __le32 out;
465 __le32 in;
466 int ret;
467
468 in = cpu_to_le32(peripheral);
469 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
470 QCOM_SCM_PAS_IS_SUPPORTED_CMD,
471 &in, sizeof(in),
472 &out, sizeof(out));
473
474 return ret ? false : !!out;
475}
476
477int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
478 dma_addr_t metadata_phys)
479{
480 __le32 scm_ret;
481 int ret;
482 struct {
483 __le32 proc;
484 __le32 image_addr;
485 } request;
486
487 request.proc = cpu_to_le32(peripheral);
488 request.image_addr = cpu_to_le32(metadata_phys);
489
490 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
491 QCOM_SCM_PAS_INIT_IMAGE_CMD,
492 &request, sizeof(request),
493 &scm_ret, sizeof(scm_ret));
494
495 return ret ? : le32_to_cpu(scm_ret);
496}
497
498int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
499 phys_addr_t addr, phys_addr_t size)
500{
501 __le32 scm_ret;
502 int ret;
503 struct {
504 __le32 proc;
505 __le32 addr;
506 __le32 len;
507 } request;
508
509 request.proc = cpu_to_le32(peripheral);
510 request.addr = cpu_to_le32(addr);
511 request.len = cpu_to_le32(size);
512
513 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
514 QCOM_SCM_PAS_MEM_SETUP_CMD,
515 &request, sizeof(request),
516 &scm_ret, sizeof(scm_ret));
517
518 return ret ? : le32_to_cpu(scm_ret);
519}
520
521int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
522{
523 __le32 out;
524 __le32 in;
525 int ret;
526
527 in = cpu_to_le32(peripheral);
528 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
529 QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
530 &in, sizeof(in),
531 &out, sizeof(out));
532
533 return ret ? : le32_to_cpu(out);
534}
535
536int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
537{
538 __le32 out;
539 __le32 in;
540 int ret;
541
542 in = cpu_to_le32(peripheral);
543 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
544 QCOM_SCM_PAS_SHUTDOWN_CMD,
545 &in, sizeof(in),
546 &out, sizeof(out));
547
548 return ret ? : le32_to_cpu(out);
549}
550
551int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
552{
553 __le32 out;
554 __le32 in = cpu_to_le32(reset);
555 int ret;
556
557 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
558 &in, sizeof(in),
559 &out, sizeof(out));
560
561 return ret ? : le32_to_cpu(out);
562}
563
564int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
565{
566 struct {
567 __le32 state;
568 __le32 id;
569 } req;
570 __le32 scm_ret = 0;
571 int ret;
572
573 req.state = cpu_to_le32(state);
574 req.id = cpu_to_le32(id);
575
576 ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
577 &req, sizeof(req), &scm_ret, sizeof(scm_ret));
578
579 return ret ? : le32_to_cpu(scm_ret);
580}
581
582int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
583 u32 spare)
584{
585 return -ENODEV;
586}
587
588int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
589 size_t *size)
590{
591 return -ENODEV;
592}
593
594int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
595 u32 spare)
596{
597 return -ENODEV;
598}
599