linux/arch/arm/mach-qcom/scm.c
<<
>>
Prefs
   1/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
   2 *
   3 * This program is free software; you can redistribute it and/or modify
   4 * it under the terms of the GNU General Public License version 2 and
   5 * only version 2 as published by the Free Software Foundation.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15 * 02110-1301, USA.
  16 */
  17
  18#include <linux/slab.h>
  19#include <linux/io.h>
  20#include <linux/module.h>
  21#include <linux/mutex.h>
  22#include <linux/errno.h>
  23#include <linux/err.h>
  24
  25#include <asm/outercache.h>
  26#include <asm/cacheflush.h>
  27
  28#include "scm.h"
  29
  30#define SCM_ENOMEM              -5
  31#define SCM_EOPNOTSUPP          -4
  32#define SCM_EINVAL_ADDR         -3
  33#define SCM_EINVAL_ARG          -2
  34#define SCM_ERROR               -1
  35#define SCM_INTERRUPTED         1
  36
  37static DEFINE_MUTEX(scm_lock);
  38
  39/**
  40 * struct scm_command - one SCM command buffer
  41 * @len: total available memory for command and response
  42 * @buf_offset: start of command buffer
  43 * @resp_hdr_offset: start of response buffer
  44 * @id: command to be executed
  45 * @buf: buffer returned from scm_get_command_buffer()
  46 *
  47 * An SCM command is laid out in memory as follows:
  48 *
  49 *      ------------------- <--- struct scm_command
  50 *      | command header  |
  51 *      ------------------- <--- scm_get_command_buffer()
  52 *      | command buffer  |
  53 *      ------------------- <--- struct scm_response and
  54 *      | response header |      scm_command_to_response()
  55 *      ------------------- <--- scm_get_response_buffer()
  56 *      | response buffer |
  57 *      -------------------
  58 *
  59 * There can be arbitrary padding between the headers and buffers so
  60 * you should always use the appropriate scm_get_*_buffer() routines
  61 * to access the buffers in a safe manner.
  62 */
  63struct scm_command {
  64        __le32 len;
  65        __le32 buf_offset;
  66        __le32 resp_hdr_offset;
  67        __le32 id;
  68        __le32 buf[0];
  69};
  70
  71/**
  72 * struct scm_response - one SCM response buffer
  73 * @len: total available memory for response
  74 * @buf_offset: start of response data relative to start of scm_response
  75 * @is_complete: indicates if the command has finished processing
  76 */
  77struct scm_response {
  78        __le32 len;
  79        __le32 buf_offset;
  80        __le32 is_complete;
  81};
  82
  83/**
  84 * alloc_scm_command() - Allocate an SCM command
  85 * @cmd_size: size of the command buffer
  86 * @resp_size: size of the response buffer
  87 *
  88 * Allocate an SCM command, including enough room for the command
  89 * and response headers as well as the command and response buffers.
  90 *
  91 * Returns a valid &scm_command on success or %NULL if the allocation fails.
  92 */
  93static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
  94{
  95        struct scm_command *cmd;
  96        size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
  97                resp_size;
  98        u32 offset;
  99
 100        cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
 101        if (cmd) {
 102                cmd->len = cpu_to_le32(len);
 103                offset = offsetof(struct scm_command, buf);
 104                cmd->buf_offset = cpu_to_le32(offset);
 105                cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size);
 106        }
 107        return cmd;
 108}
 109
 110/**
 111 * free_scm_command() - Free an SCM command
 112 * @cmd: command to free
 113 *
 114 * Free an SCM command.
 115 */
 116static inline void free_scm_command(struct scm_command *cmd)
 117{
 118        kfree(cmd);
 119}
 120
 121/**
 122 * scm_command_to_response() - Get a pointer to a scm_response
 123 * @cmd: command
 124 *
 125 * Returns a pointer to a response for a command.
 126 */
 127static inline struct scm_response *scm_command_to_response(
 128                const struct scm_command *cmd)
 129{
 130        return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
 131}
 132
 133/**
 134 * scm_get_command_buffer() - Get a pointer to a command buffer
 135 * @cmd: command
 136 *
 137 * Returns a pointer to the command buffer of a command.
 138 */
 139static inline void *scm_get_command_buffer(const struct scm_command *cmd)
 140{
 141        return (void *)cmd->buf;
 142}
 143
 144/**
 145 * scm_get_response_buffer() - Get a pointer to a response buffer
 146 * @rsp: response
 147 *
 148 * Returns a pointer to a response buffer of a response.
 149 */
 150static inline void *scm_get_response_buffer(const struct scm_response *rsp)
 151{
 152        return (void *)rsp + le32_to_cpu(rsp->buf_offset);
 153}
 154
 155static int scm_remap_error(int err)
 156{
 157        pr_err("scm_call failed with error code %d\n", err);
 158        switch (err) {
 159        case SCM_ERROR:
 160                return -EIO;
 161        case SCM_EINVAL_ADDR:
 162        case SCM_EINVAL_ARG:
 163                return -EINVAL;
 164        case SCM_EOPNOTSUPP:
 165                return -EOPNOTSUPP;
 166        case SCM_ENOMEM:
 167                return -ENOMEM;
 168        }
 169        return -EINVAL;
 170}
 171
 172static u32 smc(u32 cmd_addr)
 173{
 174        int context_id;
 175        register u32 r0 asm("r0") = 1;
 176        register u32 r1 asm("r1") = (u32)&context_id;
 177        register u32 r2 asm("r2") = cmd_addr;
 178        do {
 179                asm volatile(
 180                        __asmeq("%0", "r0")
 181                        __asmeq("%1", "r0")
 182                        __asmeq("%2", "r1")
 183                        __asmeq("%3", "r2")
 184#ifdef REQUIRES_SEC
 185                        ".arch_extension sec\n"
 186#endif
 187                        "smc    #0      @ switch to secure world\n"
 188                        : "=r" (r0)
 189                        : "r" (r0), "r" (r1), "r" (r2)
 190                        : "r3");
 191        } while (r0 == SCM_INTERRUPTED);
 192
 193        return r0;
 194}
 195
 196static int __scm_call(const struct scm_command *cmd)
 197{
 198        int ret;
 199        u32 cmd_addr = virt_to_phys(cmd);
 200
 201        /*
 202         * Flush the command buffer so that the secure world sees
 203         * the correct data.
 204         */
 205        __cpuc_flush_dcache_area((void *)cmd, cmd->len);
 206        outer_flush_range(cmd_addr, cmd_addr + cmd->len);
 207
 208        ret = smc(cmd_addr);
 209        if (ret < 0)
 210                ret = scm_remap_error(ret);
 211
 212        return ret;
 213}
 214
 215static void scm_inv_range(unsigned long start, unsigned long end)
 216{
 217        u32 cacheline_size, ctr;
 218
 219        asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
 220        cacheline_size = 4 << ((ctr >> 16) & 0xf);
 221
 222        start = round_down(start, cacheline_size);
 223        end = round_up(end, cacheline_size);
 224        outer_inv_range(start, end);
 225        while (start < end) {
 226                asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
 227                     : "memory");
 228                start += cacheline_size;
 229        }
 230        dsb();
 231        isb();
 232}
 233
 234/**
 235 * scm_call() - Send an SCM command
 236 * @svc_id: service identifier
 237 * @cmd_id: command identifier
 238 * @cmd_buf: command buffer
 239 * @cmd_len: length of the command buffer
 240 * @resp_buf: response buffer
 241 * @resp_len: length of the response buffer
 242 *
 243 * Sends a command to the SCM and waits for the command to finish processing.
 244 *
 245 * A note on cache maintenance:
 246 * Note that any buffers that are expected to be accessed by the secure world
 247 * must be flushed before invoking scm_call and invalidated in the cache
 248 * immediately after scm_call returns. Cache maintenance on the command and
 249 * response buffers is taken care of by scm_call; however, callers are
 250 * responsible for any other cached buffers passed over to the secure world.
 251 */
 252int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
 253                void *resp_buf, size_t resp_len)
 254{
 255        int ret;
 256        struct scm_command *cmd;
 257        struct scm_response *rsp;
 258        unsigned long start, end;
 259
 260        cmd = alloc_scm_command(cmd_len, resp_len);
 261        if (!cmd)
 262                return -ENOMEM;
 263
 264        cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
 265        if (cmd_buf)
 266                memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
 267
 268        mutex_lock(&scm_lock);
 269        ret = __scm_call(cmd);
 270        mutex_unlock(&scm_lock);
 271        if (ret)
 272                goto out;
 273
 274        rsp = scm_command_to_response(cmd);
 275        start = (unsigned long)rsp;
 276
 277        do {
 278                scm_inv_range(start, start + sizeof(*rsp));
 279        } while (!rsp->is_complete);
 280
 281        end = (unsigned long)scm_get_response_buffer(rsp) + resp_len;
 282        scm_inv_range(start, end);
 283
 284        if (resp_buf)
 285                memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
 286out:
 287        free_scm_command(cmd);
 288        return ret;
 289}
 290EXPORT_SYMBOL(scm_call);
 291
 292u32 scm_get_version(void)
 293{
 294        int context_id;
 295        static u32 version = -1;
 296        register u32 r0 asm("r0");
 297        register u32 r1 asm("r1");
 298
 299        if (version != -1)
 300                return version;
 301
 302        mutex_lock(&scm_lock);
 303
 304        r0 = 0x1 << 8;
 305        r1 = (u32)&context_id;
 306        do {
 307                asm volatile(
 308                        __asmeq("%0", "r0")
 309                        __asmeq("%1", "r1")
 310                        __asmeq("%2", "r0")
 311                        __asmeq("%3", "r1")
 312#ifdef REQUIRES_SEC
 313                        ".arch_extension sec\n"
 314#endif
 315                        "smc    #0      @ switch to secure world\n"
 316                        : "=r" (r0), "=r" (r1)
 317                        : "r" (r0), "r" (r1)
 318                        : "r2", "r3");
 319        } while (r0 == SCM_INTERRUPTED);
 320
 321        version = r1;
 322        mutex_unlock(&scm_lock);
 323
 324        return version;
 325}
 326EXPORT_SYMBOL(scm_get_version);
 327