1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/platform_device.h>
15#include <linux/module.h>
16#include <linux/cpumask.h>
17#include <linux/export.h>
18#include <linux/dma-mapping.h>
19#include <linux/types.h>
20#include <linux/qcom_scm.h>
21#include <linux/of.h>
22#include <linux/of_platform.h>
23#include <linux/clk.h>
24#include <linux/reset-controller.h>
25
26#include "qcom_scm.h"
27
28struct qcom_scm {
29 struct device *dev;
30 struct clk *core_clk;
31 struct clk *iface_clk;
32 struct clk *bus_clk;
33 struct reset_controller_dev reset;
34};
35
36static struct qcom_scm *__scm;
37
38static int qcom_scm_clk_enable(void)
39{
40 int ret;
41
42 ret = clk_prepare_enable(__scm->core_clk);
43 if (ret)
44 goto bail;
45
46 ret = clk_prepare_enable(__scm->iface_clk);
47 if (ret)
48 goto disable_core;
49
50 ret = clk_prepare_enable(__scm->bus_clk);
51 if (ret)
52 goto disable_iface;
53
54 return 0;
55
56disable_iface:
57 clk_disable_unprepare(__scm->iface_clk);
58disable_core:
59 clk_disable_unprepare(__scm->core_clk);
60bail:
61 return ret;
62}
63
64static void qcom_scm_clk_disable(void)
65{
66 clk_disable_unprepare(__scm->core_clk);
67 clk_disable_unprepare(__scm->iface_clk);
68 clk_disable_unprepare(__scm->bus_clk);
69}
70
71
72
73
74
75
76
77
78
79int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
80{
81 return __qcom_scm_set_cold_boot_addr(entry, cpus);
82}
83EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
84
85
86
87
88
89
90
91
92
93int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
94{
95 return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
96}
97EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
98
99
100
101
102
103
104
105
106
107void qcom_scm_cpu_power_down(u32 flags)
108{
109 __qcom_scm_cpu_power_down(flags);
110}
111EXPORT_SYMBOL(qcom_scm_cpu_power_down);
112
113
114
115
116
117
118bool qcom_scm_hdcp_available(void)
119{
120 int ret = qcom_scm_clk_enable();
121
122 if (ret)
123 return ret;
124
125 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
126 QCOM_SCM_CMD_HDCP);
127
128 qcom_scm_clk_disable();
129
130 return ret > 0 ? true : false;
131}
132EXPORT_SYMBOL(qcom_scm_hdcp_available);
133
134
135
136
137
138
139
140
141
142int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
143{
144 int ret = qcom_scm_clk_enable();
145
146 if (ret)
147 return ret;
148
149 ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
150 qcom_scm_clk_disable();
151 return ret;
152}
153EXPORT_SYMBOL(qcom_scm_hdcp_req);
154
155
156
157
158
159
160
161
162bool qcom_scm_pas_supported(u32 peripheral)
163{
164 int ret;
165
166 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
167 QCOM_SCM_PAS_IS_SUPPORTED_CMD);
168 if (ret <= 0)
169 return false;
170
171 return __qcom_scm_pas_supported(__scm->dev, peripheral);
172}
173EXPORT_SYMBOL(qcom_scm_pas_supported);
174
175
176
177
178
179
180
181
182
183
184
185
186
187int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
188{
189 dma_addr_t mdata_phys;
190 void *mdata_buf;
191 int ret;
192
193
194
195
196
197
198 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
199 GFP_KERNEL);
200 if (!mdata_buf) {
201 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
202 return -ENOMEM;
203 }
204 memcpy(mdata_buf, metadata, size);
205
206 ret = qcom_scm_clk_enable();
207 if (ret)
208 goto free_metadata;
209
210 ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
211
212 qcom_scm_clk_disable();
213
214free_metadata:
215 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
216
217 return ret;
218}
219EXPORT_SYMBOL(qcom_scm_pas_init_image);
220
221
222
223
224
225
226
227
228
229
230int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
231{
232 int ret;
233
234 ret = qcom_scm_clk_enable();
235 if (ret)
236 return ret;
237
238 ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
239 qcom_scm_clk_disable();
240
241 return ret;
242}
243EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
244
245
246
247
248
249
250
251
252int qcom_scm_pas_auth_and_reset(u32 peripheral)
253{
254 int ret;
255
256 ret = qcom_scm_clk_enable();
257 if (ret)
258 return ret;
259
260 ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
261 qcom_scm_clk_disable();
262
263 return ret;
264}
265EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
266
267
268
269
270
271
272
273int qcom_scm_pas_shutdown(u32 peripheral)
274{
275 int ret;
276
277 ret = qcom_scm_clk_enable();
278 if (ret)
279 return ret;
280
281 ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
282 qcom_scm_clk_disable();
283
284 return ret;
285}
286EXPORT_SYMBOL(qcom_scm_pas_shutdown);
287
288static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
289 unsigned long idx)
290{
291 if (idx != 0)
292 return -EINVAL;
293
294 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
295}
296
297static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
298 unsigned long idx)
299{
300 if (idx != 0)
301 return -EINVAL;
302
303 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
304}
305
306static const struct reset_control_ops qcom_scm_pas_reset_ops = {
307 .assert = qcom_scm_pas_reset_assert,
308 .deassert = qcom_scm_pas_reset_deassert,
309};
310
311
312
313
314bool qcom_scm_is_available(void)
315{
316 return !!__scm;
317}
318EXPORT_SYMBOL(qcom_scm_is_available);
319
320static int qcom_scm_probe(struct platform_device *pdev)
321{
322 struct qcom_scm *scm;
323 int ret;
324
325 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
326 if (!scm)
327 return -ENOMEM;
328
329 scm->core_clk = devm_clk_get(&pdev->dev, "core");
330 if (IS_ERR(scm->core_clk)) {
331 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
332 return PTR_ERR(scm->core_clk);
333
334 scm->core_clk = NULL;
335 }
336
337 if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) {
338 scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
339 if (IS_ERR(scm->iface_clk)) {
340 if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
341 dev_err(&pdev->dev, "failed to acquire iface clk\n");
342 return PTR_ERR(scm->iface_clk);
343 }
344
345 scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
346 if (IS_ERR(scm->bus_clk)) {
347 if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
348 dev_err(&pdev->dev, "failed to acquire bus clk\n");
349 return PTR_ERR(scm->bus_clk);
350 }
351 }
352
353 scm->reset.ops = &qcom_scm_pas_reset_ops;
354 scm->reset.nr_resets = 1;
355 scm->reset.of_node = pdev->dev.of_node;
356 reset_controller_register(&scm->reset);
357
358
359 ret = clk_set_rate(scm->core_clk, INT_MAX);
360 if (ret)
361 return ret;
362
363 __scm = scm;
364 __scm->dev = &pdev->dev;
365
366 __qcom_scm_init();
367
368 return 0;
369}
370
371static const struct of_device_id qcom_scm_dt_match[] = {
372 { .compatible = "qcom,scm-apq8064",},
373 { .compatible = "qcom,scm-msm8660",},
374 { .compatible = "qcom,scm-msm8960",},
375 { .compatible = "qcom,scm",},
376 {}
377};
378
379MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
380
381static struct platform_driver qcom_scm_driver = {
382 .driver = {
383 .name = "qcom_scm",
384 .of_match_table = qcom_scm_dt_match,
385 },
386 .probe = qcom_scm_probe,
387};
388
389static int __init qcom_scm_init(void)
390{
391 struct device_node *np, *fw_np;
392 int ret;
393
394 fw_np = of_find_node_by_name(NULL, "firmware");
395
396 if (!fw_np)
397 return -ENODEV;
398
399 np = of_find_matching_node(fw_np, qcom_scm_dt_match);
400
401 if (!np) {
402 of_node_put(fw_np);
403 return -ENODEV;
404 }
405
406 of_node_put(np);
407
408 ret = of_platform_populate(fw_np, qcom_scm_dt_match, NULL, NULL);
409
410 of_node_put(fw_np);
411
412 if (ret)
413 return ret;
414
415 return platform_driver_register(&qcom_scm_driver);
416}
417
418subsys_initcall(qcom_scm_init);
419
420static void __exit qcom_scm_exit(void)
421{
422 platform_driver_unregister(&qcom_scm_driver);
423}
424module_exit(qcom_scm_exit);
425
426MODULE_DESCRIPTION("Qualcomm SCM driver");
427MODULE_LICENSE("GPL v2");
428