linux/drivers/firmware/qcom_scm-32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
   3 * Copyright (C) 2015 Linaro Ltd.
   4 */
   5
   6#include <linux/slab.h>
   7#include <linux/io.h>
   8#include <linux/module.h>
   9#include <linux/mutex.h>
  10#include <linux/errno.h>
  11#include <linux/err.h>
  12#include <linux/qcom_scm.h>
  13#include <linux/dma-mapping.h>
  14
  15#include "qcom_scm.h"
  16
  17#define QCOM_SCM_FLAG_COLDBOOT_CPU0     0x00
  18#define QCOM_SCM_FLAG_COLDBOOT_CPU1     0x01
  19#define QCOM_SCM_FLAG_COLDBOOT_CPU2     0x08
  20#define QCOM_SCM_FLAG_COLDBOOT_CPU3     0x20
  21
  22#define QCOM_SCM_FLAG_WARMBOOT_CPU0     0x04
  23#define QCOM_SCM_FLAG_WARMBOOT_CPU1     0x02
  24#define QCOM_SCM_FLAG_WARMBOOT_CPU2     0x10
  25#define QCOM_SCM_FLAG_WARMBOOT_CPU3     0x40
  26
  27struct qcom_scm_entry {
  28        int flag;
  29        void *entry;
  30};
  31
  32static struct qcom_scm_entry qcom_scm_wb[] = {
  33        { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
  34        { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
  35        { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
  36        { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
  37};
  38
  39static DEFINE_MUTEX(qcom_scm_lock);
  40
  41/**
  42 * struct qcom_scm_command - one SCM command buffer
  43 * @len: total available memory for command and response
  44 * @buf_offset: start of command buffer
  45 * @resp_hdr_offset: start of response buffer
  46 * @id: command to be executed
  47 * @buf: buffer returned from qcom_scm_get_command_buffer()
  48 *
  49 * An SCM command is laid out in memory as follows:
  50 *
  51 *      ------------------- <--- struct qcom_scm_command
  52 *      | command header  |
  53 *      ------------------- <--- qcom_scm_get_command_buffer()
  54 *      | command buffer  |
  55 *      ------------------- <--- struct qcom_scm_response and
  56 *      | response header |      qcom_scm_command_to_response()
  57 *      ------------------- <--- qcom_scm_get_response_buffer()
  58 *      | response buffer |
  59 *      -------------------
  60 *
  61 * There can be arbitrary padding between the headers and buffers so
  62 * you should always use the appropriate qcom_scm_get_*_buffer() routines
  63 * to access the buffers in a safe manner.
  64 */
  65struct qcom_scm_command {
  66        __le32 len;
  67        __le32 buf_offset;
  68        __le32 resp_hdr_offset;
  69        __le32 id;
  70        __le32 buf[0];
  71};
  72
  73/**
  74 * struct qcom_scm_response - one SCM response buffer
  75 * @len: total available memory for response
  76 * @buf_offset: start of response data relative to start of qcom_scm_response
  77 * @is_complete: indicates if the command has finished processing
  78 */
  79struct qcom_scm_response {
  80        __le32 len;
  81        __le32 buf_offset;
  82        __le32 is_complete;
  83};
  84
  85/**
  86 * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
  87 * @cmd: command
  88 *
  89 * Returns a pointer to a response for a command.
  90 */
  91static inline struct qcom_scm_response *qcom_scm_command_to_response(
  92                const struct qcom_scm_command *cmd)
  93{
  94        return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
  95}
  96
  97/**
  98 * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
  99 * @cmd: command
 100 *
 101 * Returns a pointer to the command buffer of a command.
 102 */
 103static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
 104{
 105        return (void *)cmd->buf;
 106}
 107
 108/**
 109 * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
 110 * @rsp: response
 111 *
 112 * Returns a pointer to a response buffer of a response.
 113 */
 114static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
 115{
 116        return (void *)rsp + le32_to_cpu(rsp->buf_offset);
 117}
 118
 119static u32 smc(u32 cmd_addr)
 120{
 121        int context_id;
 122        register u32 r0 asm("r0") = 1;
 123        register u32 r1 asm("r1") = (u32)&context_id;
 124        register u32 r2 asm("r2") = cmd_addr;
 125        do {
 126                asm volatile(
 127                        __asmeq("%0", "r0")
 128                        __asmeq("%1", "r0")
 129                        __asmeq("%2", "r1")
 130                        __asmeq("%3", "r2")
 131#ifdef REQUIRES_SEC
 132                        ".arch_extension sec\n"
 133#endif
 134                        "smc    #0      @ switch to secure world\n"
 135                        : "=r" (r0)
 136                        : "r" (r0), "r" (r1), "r" (r2)
 137                        : "r3", "r12");
 138        } while (r0 == QCOM_SCM_INTERRUPTED);
 139
 140        return r0;
 141}
 142
 143/**
 144 * qcom_scm_call() - Send an SCM command
 145 * @dev: struct device
 146 * @svc_id: service identifier
 147 * @cmd_id: command identifier
 148 * @cmd_buf: command buffer
 149 * @cmd_len: length of the command buffer
 150 * @resp_buf: response buffer
 151 * @resp_len: length of the response buffer
 152 *
 153 * Sends a command to the SCM and waits for the command to finish processing.
 154 *
 155 * A note on cache maintenance:
 156 * Note that any buffers that are expected to be accessed by the secure world
 157 * must be flushed before invoking qcom_scm_call and invalidated in the cache
 158 * immediately after qcom_scm_call returns. Cache maintenance on the command
 159 * and response buffers is taken care of by qcom_scm_call; however, callers are
 160 * responsible for any other cached buffers passed over to the secure world.
 161 */
 162static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
 163                         const void *cmd_buf, size_t cmd_len, void *resp_buf,
 164                         size_t resp_len)
 165{
 166        int ret;
 167        struct qcom_scm_command *cmd;
 168        struct qcom_scm_response *rsp;
 169        size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
 170        dma_addr_t cmd_phys;
 171
 172        cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
 173        if (!cmd)
 174                return -ENOMEM;
 175
 176        cmd->len = cpu_to_le32(alloc_len);
 177        cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
 178        cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
 179
 180        cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
 181        if (cmd_buf)
 182                memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
 183
 184        rsp = qcom_scm_command_to_response(cmd);
 185
 186        cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
 187        if (dma_mapping_error(dev, cmd_phys)) {
 188                kfree(cmd);
 189                return -ENOMEM;
 190        }
 191
 192        mutex_lock(&qcom_scm_lock);
 193        ret = smc(cmd_phys);
 194        if (ret < 0)
 195                ret = qcom_scm_remap_error(ret);
 196        mutex_unlock(&qcom_scm_lock);
 197        if (ret)
 198                goto out;
 199
 200        do {
 201                dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
 202                                        sizeof(*rsp), DMA_FROM_DEVICE);
 203        } while (!rsp->is_complete);
 204
 205        if (resp_buf) {
 206                dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
 207                                        le32_to_cpu(rsp->buf_offset),
 208                                        resp_len, DMA_FROM_DEVICE);
 209                memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
 210                       resp_len);
 211        }
 212out:
 213        dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
 214        kfree(cmd);
 215        return ret;
 216}
 217
 218#define SCM_CLASS_REGISTER      (0x2 << 8)
 219#define SCM_MASK_IRQS           BIT(5)
 220#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
 221                                SCM_CLASS_REGISTER | \
 222                                SCM_MASK_IRQS | \
 223                                (n & 0xf))
 224
 225/**
 226 * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
 227 * @svc_id: service identifier
 228 * @cmd_id: command identifier
 229 * @arg1: first argument
 230 *
 231 * This shall only be used with commands that are guaranteed to be
 232 * uninterruptable, atomic and SMP safe.
 233 */
 234static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
 235{
 236        int context_id;
 237
 238        register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
 239        register u32 r1 asm("r1") = (u32)&context_id;
 240        register u32 r2 asm("r2") = arg1;
 241
 242        asm volatile(
 243                        __asmeq("%0", "r0")
 244                        __asmeq("%1", "r0")
 245                        __asmeq("%2", "r1")
 246                        __asmeq("%3", "r2")
 247#ifdef REQUIRES_SEC
 248                        ".arch_extension sec\n"
 249#endif
 250                        "smc    #0      @ switch to secure world\n"
 251                        : "=r" (r0)
 252                        : "r" (r0), "r" (r1), "r" (r2)
 253                        : "r3", "r12");
 254        return r0;
 255}
 256
 257/**
 258 * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
 259 * @svc_id:     service identifier
 260 * @cmd_id:     command identifier
 261 * @arg1:       first argument
 262 * @arg2:       second argument
 263 *
 264 * This shall only be used with commands that are guaranteed to be
 265 * uninterruptable, atomic and SMP safe.
 266 */
 267static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
 268{
 269        int context_id;
 270
 271        register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
 272        register u32 r1 asm("r1") = (u32)&context_id;
 273        register u32 r2 asm("r2") = arg1;
 274        register u32 r3 asm("r3") = arg2;
 275
 276        asm volatile(
 277                        __asmeq("%0", "r0")
 278                        __asmeq("%1", "r0")
 279                        __asmeq("%2", "r1")
 280                        __asmeq("%3", "r2")
 281                        __asmeq("%4", "r3")
 282#ifdef REQUIRES_SEC
 283                        ".arch_extension sec\n"
 284#endif
 285                        "smc    #0      @ switch to secure world\n"
 286                        : "=r" (r0)
 287                        : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
 288                        : "r12");
 289        return r0;
 290}
 291
 292u32 qcom_scm_get_version(void)
 293{
 294        int context_id;
 295        static u32 version = -1;
 296        register u32 r0 asm("r0");
 297        register u32 r1 asm("r1");
 298
 299        if (version != -1)
 300                return version;
 301
 302        mutex_lock(&qcom_scm_lock);
 303
 304        r0 = 0x1 << 8;
 305        r1 = (u32)&context_id;
 306        do {
 307                asm volatile(
 308                        __asmeq("%0", "r0")
 309                        __asmeq("%1", "r1")
 310                        __asmeq("%2", "r0")
 311                        __asmeq("%3", "r1")
 312#ifdef REQUIRES_SEC
 313                        ".arch_extension sec\n"
 314#endif
 315                        "smc    #0      @ switch to secure world\n"
 316                        : "=r" (r0), "=r" (r1)
 317                        : "r" (r0), "r" (r1)
 318                        : "r2", "r3", "r12");
 319        } while (r0 == QCOM_SCM_INTERRUPTED);
 320
 321        version = r1;
 322        mutex_unlock(&qcom_scm_lock);
 323
 324        return version;
 325}
 326EXPORT_SYMBOL(qcom_scm_get_version);
 327
 328/**
 329 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
 330 * @entry: Entry point function for the cpus
 331 * @cpus: The cpumask of cpus that will use the entry point
 332 *
 333 * Set the cold boot address of the cpus. Any cpu outside the supported
 334 * range would be removed from the cpu present mask.
 335 */
 336int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
 337{
 338        int flags = 0;
 339        int cpu;
 340        int scm_cb_flags[] = {
 341                QCOM_SCM_FLAG_COLDBOOT_CPU0,
 342                QCOM_SCM_FLAG_COLDBOOT_CPU1,
 343                QCOM_SCM_FLAG_COLDBOOT_CPU2,
 344                QCOM_SCM_FLAG_COLDBOOT_CPU3,
 345        };
 346
 347        if (!cpus || (cpus && cpumask_empty(cpus)))
 348                return -EINVAL;
 349
 350        for_each_cpu(cpu, cpus) {
 351                if (cpu < ARRAY_SIZE(scm_cb_flags))
 352                        flags |= scm_cb_flags[cpu];
 353                else
 354                        set_cpu_present(cpu, false);
 355        }
 356
 357        return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
 358                                    flags, virt_to_phys(entry));
 359}
 360
 361/**
 362 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
 363 * @entry: Entry point function for the cpus
 364 * @cpus: The cpumask of cpus that will use the entry point
 365 *
 366 * Set the Linux entry point for the SCM to transfer control to when coming
 367 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
 368 */
 369int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
 370                                  const cpumask_t *cpus)
 371{
 372        int ret;
 373        int flags = 0;
 374        int cpu;
 375        struct {
 376                __le32 flags;
 377                __le32 addr;
 378        } cmd;
 379
 380        /*
 381         * Reassign only if we are switching from hotplug entry point
 382         * to cpuidle entry point or vice versa.
 383         */
 384        for_each_cpu(cpu, cpus) {
 385                if (entry == qcom_scm_wb[cpu].entry)
 386                        continue;
 387                flags |= qcom_scm_wb[cpu].flag;
 388        }
 389
 390        /* No change in entry function */
 391        if (!flags)
 392                return 0;
 393
 394        cmd.addr = cpu_to_le32(virt_to_phys(entry));
 395        cmd.flags = cpu_to_le32(flags);
 396        ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
 397                            &cmd, sizeof(cmd), NULL, 0);
 398        if (!ret) {
 399                for_each_cpu(cpu, cpus)
 400                        qcom_scm_wb[cpu].entry = entry;
 401        }
 402
 403        return ret;
 404}
 405
 406/**
 407 * qcom_scm_cpu_power_down() - Power down the cpu
 408 * @flags - Flags to flush cache
 409 *
 410 * This is an end point to power down cpu. If there was a pending interrupt,
 411 * the control would return from this function, otherwise, the cpu jumps to the
 412 * warm boot entry point set for this cpu upon reset.
 413 */
 414void __qcom_scm_cpu_power_down(u32 flags)
 415{
 416        qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
 417                        flags & QCOM_SCM_FLUSH_FLAG_MASK);
 418}
 419
 420int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
 421{
 422        int ret;
 423        __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
 424        __le32 ret_val = 0;
 425
 426        ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
 427                            &svc_cmd, sizeof(svc_cmd), &ret_val,
 428                            sizeof(ret_val));
 429        if (ret)
 430                return ret;
 431
 432        return le32_to_cpu(ret_val);
 433}
 434
 435int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
 436                        u32 req_cnt, u32 *resp)
 437{
 438        if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
 439                return -ERANGE;
 440
 441        return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
 442                req, req_cnt * sizeof(*req), resp, sizeof(*resp));
 443}
 444
 445void __qcom_scm_init(void)
 446{
 447}
 448
 449bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
 450{
 451        __le32 out;
 452        __le32 in;
 453        int ret;
 454
 455        in = cpu_to_le32(peripheral);
 456        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
 457                            QCOM_SCM_PAS_IS_SUPPORTED_CMD,
 458                            &in, sizeof(in),
 459                            &out, sizeof(out));
 460
 461        return ret ? false : !!out;
 462}
 463
 464int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
 465                              dma_addr_t metadata_phys)
 466{
 467        __le32 scm_ret;
 468        int ret;
 469        struct {
 470                __le32 proc;
 471                __le32 image_addr;
 472        } request;
 473
 474        request.proc = cpu_to_le32(peripheral);
 475        request.image_addr = cpu_to_le32(metadata_phys);
 476
 477        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
 478                            QCOM_SCM_PAS_INIT_IMAGE_CMD,
 479                            &request, sizeof(request),
 480                            &scm_ret, sizeof(scm_ret));
 481
 482        return ret ? : le32_to_cpu(scm_ret);
 483}
 484
 485int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
 486                             phys_addr_t addr, phys_addr_t size)
 487{
 488        __le32 scm_ret;
 489        int ret;
 490        struct {
 491                __le32 proc;
 492                __le32 addr;
 493                __le32 len;
 494        } request;
 495
 496        request.proc = cpu_to_le32(peripheral);
 497        request.addr = cpu_to_le32(addr);
 498        request.len = cpu_to_le32(size);
 499
 500        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
 501                            QCOM_SCM_PAS_MEM_SETUP_CMD,
 502                            &request, sizeof(request),
 503                            &scm_ret, sizeof(scm_ret));
 504
 505        return ret ? : le32_to_cpu(scm_ret);
 506}
 507
 508int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
 509{
 510        __le32 out;
 511        __le32 in;
 512        int ret;
 513
 514        in = cpu_to_le32(peripheral);
 515        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
 516                            QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
 517                            &in, sizeof(in),
 518                            &out, sizeof(out));
 519
 520        return ret ? : le32_to_cpu(out);
 521}
 522
 523int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
 524{
 525        __le32 out;
 526        __le32 in;
 527        int ret;
 528
 529        in = cpu_to_le32(peripheral);
 530        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
 531                            QCOM_SCM_PAS_SHUTDOWN_CMD,
 532                            &in, sizeof(in),
 533                            &out, sizeof(out));
 534
 535        return ret ? : le32_to_cpu(out);
 536}
 537
 538int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
 539{
 540        __le32 out;
 541        __le32 in = cpu_to_le32(reset);
 542        int ret;
 543
 544        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
 545                        &in, sizeof(in),
 546                        &out, sizeof(out));
 547
 548        return ret ? : le32_to_cpu(out);
 549}
 550
 551int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
 552{
 553        return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
 554                                     enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
 555}
 556
 557int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
 558{
 559        struct {
 560                __le32 state;
 561                __le32 id;
 562        } req;
 563        __le32 scm_ret = 0;
 564        int ret;
 565
 566        req.state = cpu_to_le32(state);
 567        req.id = cpu_to_le32(id);
 568
 569        ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
 570                            &req, sizeof(req), &scm_ret, sizeof(scm_ret));
 571
 572        return ret ? : le32_to_cpu(scm_ret);
 573}
 574
 575int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
 576                          size_t mem_sz, phys_addr_t src, size_t src_sz,
 577                          phys_addr_t dest, size_t dest_sz)
 578{
 579        return -ENODEV;
 580}
 581
 582int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
 583                               u32 spare)
 584{
 585        return -ENODEV;
 586}
 587
 588int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
 589                                      size_t *size)
 590{
 591        return -ENODEV;
 592}
 593
 594int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
 595                                      u32 spare)
 596{
 597        return -ENODEV;
 598}
 599
 600int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
 601                        unsigned int *val)
 602{
 603        int ret;
 604
 605        ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
 606        if (ret >= 0)
 607                *val = ret;
 608
 609        return ret < 0 ? ret : 0;
 610}
 611
 612int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
 613{
 614        return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
 615                                     addr, val);
 616}
 617