linux/drivers/firmware/qcom_scm-32.c
<<
>>
Prefs
   1/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
   2 * Copyright (C) 2015 Linaro Ltd.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 and
   6 * only version 2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program; if not, write to the Free Software
  15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16 * 02110-1301, USA.
  17 */
  18
  19#include <linux/slab.h>
  20#include <linux/io.h>
  21#include <linux/module.h>
  22#include <linux/mutex.h>
  23#include <linux/errno.h>
  24#include <linux/err.h>
  25#include <linux/qcom_scm.h>
  26#include <linux/dma-mapping.h>
  27
  28#include "qcom_scm.h"
  29
  30#define QCOM_SCM_FLAG_COLDBOOT_CPU0     0x00
  31#define QCOM_SCM_FLAG_COLDBOOT_CPU1     0x01
  32#define QCOM_SCM_FLAG_COLDBOOT_CPU2     0x08
  33#define QCOM_SCM_FLAG_COLDBOOT_CPU3     0x20
  34
  35#define QCOM_SCM_FLAG_WARMBOOT_CPU0     0x04
  36#define QCOM_SCM_FLAG_WARMBOOT_CPU1     0x02
  37#define QCOM_SCM_FLAG_WARMBOOT_CPU2     0x10
  38#define QCOM_SCM_FLAG_WARMBOOT_CPU3     0x40
  39
  40struct qcom_scm_entry {
  41        int flag;
  42        void *entry;
  43};
  44
  45static struct qcom_scm_entry qcom_scm_wb[] = {
  46        { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
  47        { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
  48        { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
  49        { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
  50};
  51
  52static DEFINE_MUTEX(qcom_scm_lock);
  53
  54/**
  55 * struct qcom_scm_command - one SCM command buffer
  56 * @len: total available memory for command and response
  57 * @buf_offset: start of command buffer
  58 * @resp_hdr_offset: start of response buffer
  59 * @id: command to be executed
  60 * @buf: buffer returned from qcom_scm_get_command_buffer()
  61 *
  62 * An SCM command is laid out in memory as follows:
  63 *
  64 *      ------------------- <--- struct qcom_scm_command
  65 *      | command header  |
  66 *      ------------------- <--- qcom_scm_get_command_buffer()
  67 *      | command buffer  |
  68 *      ------------------- <--- struct qcom_scm_response and
  69 *      | response header |      qcom_scm_command_to_response()
  70 *      ------------------- <--- qcom_scm_get_response_buffer()
  71 *      | response buffer |
  72 *      -------------------
  73 *
  74 * There can be arbitrary padding between the headers and buffers so
  75 * you should always use the appropriate qcom_scm_get_*_buffer() routines
  76 * to access the buffers in a safe manner.
  77 */
  78struct qcom_scm_command {
  79        __le32 len;
  80        __le32 buf_offset;
  81        __le32 resp_hdr_offset;
  82        __le32 id;
  83        __le32 buf[0];
  84};
  85
  86/**
  87 * struct qcom_scm_response - one SCM response buffer
  88 * @len: total available memory for response
  89 * @buf_offset: start of response data relative to start of qcom_scm_response
  90 * @is_complete: indicates if the command has finished processing
  91 */
  92struct qcom_scm_response {
  93        __le32 len;
  94        __le32 buf_offset;
  95        __le32 is_complete;
  96};
  97
  98/**
  99 * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
 100 * @cmd: command
 101 *
 102 * Returns a pointer to a response for a command.
 103 */
 104static inline struct qcom_scm_response *qcom_scm_command_to_response(
 105                const struct qcom_scm_command *cmd)
 106{
 107        return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
 108}
 109
 110/**
 111 * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
 112 * @cmd: command
 113 *
 114 * Returns a pointer to the command buffer of a command.
 115 */
 116static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
 117{
 118        return (void *)cmd->buf;
 119}
 120
 121/**
 122 * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
 123 * @rsp: response
 124 *
 125 * Returns a pointer to a response buffer of a response.
 126 */
 127static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
 128{
 129        return (void *)rsp + le32_to_cpu(rsp->buf_offset);
 130}
 131
 132static u32 smc(u32 cmd_addr)
 133{
 134        int context_id;
 135        register u32 r0 asm("r0") = 1;
 136        register u32 r1 asm("r1") = (u32)&context_id;
 137        register u32 r2 asm("r2") = cmd_addr;
 138        do {
 139                asm volatile(
 140                        __asmeq("%0", "r0")
 141                        __asmeq("%1", "r0")
 142                        __asmeq("%2", "r1")
 143                        __asmeq("%3", "r2")
 144#ifdef REQUIRES_SEC
 145                        ".arch_extension sec\n"
 146#endif
 147                        "smc    #0      @ switch to secure world\n"
 148                        : "=r" (r0)
 149                        : "r" (r0), "r" (r1), "r" (r2)
 150                        : "r3", "r12");
 151        } while (r0 == QCOM_SCM_INTERRUPTED);
 152
 153        return r0;
 154}
 155
 156/**
 157 * qcom_scm_call() - Send an SCM command
 158 * @dev: struct device
 159 * @svc_id: service identifier
 160 * @cmd_id: command identifier
 161 * @cmd_buf: command buffer
 162 * @cmd_len: length of the command buffer
 163 * @resp_buf: response buffer
 164 * @resp_len: length of the response buffer
 165 *
 166 * Sends a command to the SCM and waits for the command to finish processing.
 167 *
 168 * A note on cache maintenance:
 169 * Note that any buffers that are expected to be accessed by the secure world
 170 * must be flushed before invoking qcom_scm_call and invalidated in the cache
 171 * immediately after qcom_scm_call returns. Cache maintenance on the command
 172 * and response buffers is taken care of by qcom_scm_call; however, callers are
 173 * responsible for any other cached buffers passed over to the secure world.
 174 */
 175static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
 176                         const void *cmd_buf, size_t cmd_len, void *resp_buf,
 177                         size_t resp_len)
 178{
 179        int ret;
 180        struct qcom_scm_command *cmd;
 181        struct qcom_scm_response *rsp;
 182        size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
 183        dma_addr_t cmd_phys;
 184
 185        cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
 186        if (!cmd)
 187                return -ENOMEM;
 188
 189        cmd->len = cpu_to_le32(alloc_len);
 190        cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
 191        cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
 192
 193        cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
 194        if (cmd_buf)
 195                memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
 196
 197        rsp = qcom_scm_command_to_response(cmd);
 198
 199        cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
 200        if (dma_mapping_error(dev, cmd_phys)) {
 201                kfree(cmd);
 202                return -ENOMEM;
 203        }
 204
 205        mutex_lock(&qcom_scm_lock);
 206        ret = smc(cmd_phys);
 207        if (ret < 0)
 208                ret = qcom_scm_remap_error(ret);
 209        mutex_unlock(&qcom_scm_lock);
 210        if (ret)
 211                goto out;
 212
 213        do {
 214                dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
 215                                        sizeof(*rsp), DMA_FROM_DEVICE);
 216        } while (!rsp->is_complete);
 217
 218        if (resp_buf) {
 219                dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
 220                                        le32_to_cpu(rsp->buf_offset),
 221                                        resp_len, DMA_FROM_DEVICE);
 222                memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
 223                       resp_len);
 224        }
 225out:
 226        dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
 227        kfree(cmd);
 228        return ret;
 229}
 230
 231#define SCM_CLASS_REGISTER      (0x2 << 8)
 232#define SCM_MASK_IRQS           BIT(5)
 233#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
 234                                SCM_CLASS_REGISTER | \
 235                                SCM_MASK_IRQS | \
 236                                (n & 0xf))
 237
 238/**
 239 * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
 240 * @svc_id: service identifier
 241 * @cmd_id: command identifier
 242 * @arg1: first argument
 243 *
 244 * This shall only be used with commands that are guaranteed to be
 245 * uninterruptable, atomic and SMP safe.
 246 */
 247static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
 248{
 249        int context_id;
 250
 251        register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
 252        register u32 r1 asm("r1") = (u32)&context_id;
 253        register u32 r2 asm("r2") = arg1;
 254
 255        asm volatile(
 256                        __asmeq("%0", "r0")
 257                        __asmeq("%1", "r0")
 258                        __asmeq("%2", "r1")
 259                        __asmeq("%3", "r2")
 260#ifdef REQUIRES_SEC
 261                        ".arch_extension sec\n"
 262#endif
 263                        "smc    #0      @ switch to secure world\n"
 264                        : "=r" (r0)
 265                        : "r" (r0), "r" (r1), "r" (r2)
 266                        : "r3", "r12");
 267        return r0;
 268}
 269
 270/**
 271 * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
 272 * @svc_id:     service identifier
 273 * @cmd_id:     command identifier
 274 * @arg1:       first argument
 275 * @arg2:       second argument
 276 *
 277 * This shall only be used with commands that are guaranteed to be
 278 * uninterruptable, atomic and SMP safe.
 279 */
 280static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
 281{
 282        int context_id;
 283
 284        register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
 285        register u32 r1 asm("r1") = (u32)&context_id;
 286        register u32 r2 asm("r2") = arg1;
 287        register u32 r3 asm("r3") = arg2;
 288
 289        asm volatile(
 290                        __asmeq("%0", "r0")
 291                        __asmeq("%1", "r0")
 292                        __asmeq("%2", "r1")
 293                        __asmeq("%3", "r2")
 294                        __asmeq("%4", "r3")
 295#ifdef REQUIRES_SEC
 296                        ".arch_extension sec\n"
 297#endif
 298                        "smc    #0      @ switch to secure world\n"
 299                        : "=r" (r0)
 300                        : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
 301                        : "r12");
 302        return r0;
 303}
 304
 305u32 qcom_scm_get_version(void)
 306{
 307        int context_id;
 308        static u32 version = -1;
 309        register u32 r0 asm("r0");
 310        register u32 r1 asm("r1");
 311
 312        if (version != -1)
 313                return version;
 314
 315        mutex_lock(&qcom_scm_lock);
 316
 317        r0 = 0x1 << 8;
 318        r1 = (u32)&context_id;
 319        do {
 320                asm volatile(
 321                        __asmeq("%0", "r0")
 322                        __asmeq("%1", "r1")
 323                        __asmeq("%2", "r0")
 324                        __asmeq("%3", "r1")
 325#ifdef REQUIRES_SEC
 326                        ".arch_extension sec\n"
 327#endif
 328                        "smc    #0      @ switch to secure world\n"
 329                        : "=r" (r0), "=r" (r1)
 330                        : "r" (r0), "r" (r1)
 331                        : "r2", "r3", "r12");
 332        } while (r0 == QCOM_SCM_INTERRUPTED);
 333
 334        version = r1;
 335        mutex_unlock(&qcom_scm_lock);
 336
 337        return version;
 338}
 339EXPORT_SYMBOL(qcom_scm_get_version);
 340
 341/**
 342 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
 343 * @entry: Entry point function for the cpus
 344 * @cpus: The cpumask of cpus that will use the entry point
 345 *
 346 * Set the cold boot address of the cpus. Any cpu outside the supported
 347 * range would be removed from the cpu present mask.
 348 */
 349int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
 350{
 351        int flags = 0;
 352        int cpu;
 353        int scm_cb_flags[] = {
 354                QCOM_SCM_FLAG_COLDBOOT_CPU0,
 355                QCOM_SCM_FLAG_COLDBOOT_CPU1,
 356                QCOM_SCM_FLAG_COLDBOOT_CPU2,
 357                QCOM_SCM_FLAG_COLDBOOT_CPU3,
 358        };
 359
 360        if (!cpus || (cpus && cpumask_empty(cpus)))
 361                return -EINVAL;
 362
 363        for_each_cpu(cpu, cpus) {
 364                if (cpu < ARRAY_SIZE(scm_cb_flags))
 365                        flags |= scm_cb_flags[cpu];
 366                else
 367                        set_cpu_present(cpu, false);
 368        }
 369
 370        return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
 371                                    flags, virt_to_phys(entry));
 372}
 373
 374/**
 375 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
 376 * @entry: Entry point function for the cpus
 377 * @cpus: The cpumask of cpus that will use the entry point
 378 *
 379 * Set the Linux entry point for the SCM to transfer control to when coming
 380 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
 381 */
 382int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
 383                                  const cpumask_t *cpus)
 384{
 385        int ret;
 386        int flags = 0;
 387        int cpu;
 388        struct {
 389                __le32 flags;
 390                __le32 addr;
 391        } cmd;
 392
 393        /*
 394         * Reassign only if we are switching from hotplug entry point
 395         * to cpuidle entry point or vice versa.
 396         */
 397        for_each_cpu(cpu, cpus) {
 398                if (entry == qcom_scm_wb[cpu].entry)
 399                        continue;
 400                flags |= qcom_scm_wb[cpu].flag;
 401        }
 402
 403        /* No change in entry function */
 404        if (!flags)
 405                return 0;
 406
 407        cmd.addr = cpu_to_le32(virt_to_phys(entry));
 408        cmd.flags = cpu_to_le32(flags);
 409        ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
 410                            &cmd, sizeof(cmd), NULL, 0);
 411        if (!ret) {
 412                for_each_cpu(cpu, cpus)
 413                        qcom_scm_wb[cpu].entry = entry;
 414        }
 415
 416        return ret;
 417}
 418
 419/**
 420 * qcom_scm_cpu_power_down() - Power down the cpu
 421 * @flags - Flags to flush cache
 422 *
 423 * This is an end point to power down cpu. If there was a pending interrupt,
 424 * the control would return from this function, otherwise, the cpu jumps to the
 425 * warm boot entry point set for this cpu upon reset.
 426 */
 427void __qcom_scm_cpu_power_down(u32 flags)
 428{
 429        qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
 430                        flags & QCOM_SCM_FLUSH_FLAG_MASK);
 431}
 432
 433int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
 434{
 435        int ret;
 436        __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
 437        __le32 ret_val = 0;
 438
 439        ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
 440                            &svc_cmd, sizeof(svc_cmd), &ret_val,
 441                            sizeof(ret_val));
 442        if (ret)
 443                return ret;
 444
 445        return le32_to_cpu(ret_val);
 446}
 447
 448int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
 449                        u32 req_cnt, u32 *resp)
 450{
 451        if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
 452                return -ERANGE;
 453
 454        return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
 455                req, req_cnt * sizeof(*req), resp, sizeof(*resp));
 456}
 457
 458void __qcom_scm_init(void)
 459{
 460}
 461
 462bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
 463{
 464        __le32 out;
 465        __le32 in;
 466        int ret;
 467
 468        in = cpu_to_le32(peripheral);
 469        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
 470                            QCOM_SCM_PAS_IS_SUPPORTED_CMD,
 471                            &in, sizeof(in),
 472                            &out, sizeof(out));
 473
 474        return ret ? false : !!out;
 475}
 476
 477int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
 478                              dma_addr_t metadata_phys)
 479{
 480        __le32 scm_ret;
 481        int ret;
 482        struct {
 483                __le32 proc;
 484                __le32 image_addr;
 485        } request;
 486
 487        request.proc = cpu_to_le32(peripheral);
 488        request.image_addr = cpu_to_le32(metadata_phys);
 489
 490        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
 491                            QCOM_SCM_PAS_INIT_IMAGE_CMD,
 492                            &request, sizeof(request),
 493                            &scm_ret, sizeof(scm_ret));
 494
 495        return ret ? : le32_to_cpu(scm_ret);
 496}
 497
 498int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
 499                             phys_addr_t addr, phys_addr_t size)
 500{
 501        __le32 scm_ret;
 502        int ret;
 503        struct {
 504                __le32 proc;
 505                __le32 addr;
 506                __le32 len;
 507        } request;
 508
 509        request.proc = cpu_to_le32(peripheral);
 510        request.addr = cpu_to_le32(addr);
 511        request.len = cpu_to_le32(size);
 512
 513        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
 514                            QCOM_SCM_PAS_MEM_SETUP_CMD,
 515                            &request, sizeof(request),
 516                            &scm_ret, sizeof(scm_ret));
 517
 518        return ret ? : le32_to_cpu(scm_ret);
 519}
 520
 521int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
 522{
 523        __le32 out;
 524        __le32 in;
 525        int ret;
 526
 527        in = cpu_to_le32(peripheral);
 528        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
 529                            QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
 530                            &in, sizeof(in),
 531                            &out, sizeof(out));
 532
 533        return ret ? : le32_to_cpu(out);
 534}
 535
 536int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
 537{
 538        __le32 out;
 539        __le32 in;
 540        int ret;
 541
 542        in = cpu_to_le32(peripheral);
 543        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
 544                            QCOM_SCM_PAS_SHUTDOWN_CMD,
 545                            &in, sizeof(in),
 546                            &out, sizeof(out));
 547
 548        return ret ? : le32_to_cpu(out);
 549}
 550
 551int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
 552{
 553        __le32 out;
 554        __le32 in = cpu_to_le32(reset);
 555        int ret;
 556
 557        ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
 558                        &in, sizeof(in),
 559                        &out, sizeof(out));
 560
 561        return ret ? : le32_to_cpu(out);
 562}
 563
 564int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
 565{
 566        return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
 567                                     enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
 568}
 569
 570int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
 571{
 572        struct {
 573                __le32 state;
 574                __le32 id;
 575        } req;
 576        __le32 scm_ret = 0;
 577        int ret;
 578
 579        req.state = cpu_to_le32(state);
 580        req.id = cpu_to_le32(id);
 581
 582        ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
 583                            &req, sizeof(req), &scm_ret, sizeof(scm_ret));
 584
 585        return ret ? : le32_to_cpu(scm_ret);
 586}
 587
 588int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
 589                          size_t mem_sz, phys_addr_t src, size_t src_sz,
 590                          phys_addr_t dest, size_t dest_sz)
 591{
 592        return -ENODEV;
 593}
 594
 595int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
 596                               u32 spare)
 597{
 598        return -ENODEV;
 599}
 600
 601int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
 602                                      size_t *size)
 603{
 604        return -ENODEV;
 605}
 606
 607int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
 608                                      u32 spare)
 609{
 610        return -ENODEV;
 611}
 612
 613int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
 614                        unsigned int *val)
 615{
 616        int ret;
 617
 618        ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
 619        if (ret >= 0)
 620                *val = ret;
 621
 622        return ret < 0 ? ret : 0;
 623}
 624
 625int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
 626{
 627        return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
 628                                     addr, val);
 629}
 630