linux/drivers/remoteproc/ti_k3_r5_remoteproc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * TI K3 R5F (MCU) Remote Processor driver
   4 *
   5 * Copyright (C) 2017-2020 Texas Instruments Incorporated - https://www.ti.com/
   6 *      Suman Anna <s-anna@ti.com>
   7 */
   8
   9#include <linux/dma-mapping.h>
  10#include <linux/err.h>
  11#include <linux/interrupt.h>
  12#include <linux/kernel.h>
  13#include <linux/mailbox_client.h>
  14#include <linux/module.h>
  15#include <linux/of_address.h>
  16#include <linux/of_device.h>
  17#include <linux/of_reserved_mem.h>
  18#include <linux/omap-mailbox.h>
  19#include <linux/platform_device.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/remoteproc.h>
  22#include <linux/reset.h>
  23#include <linux/slab.h>
  24
  25#include "omap_remoteproc.h"
  26#include "remoteproc_internal.h"
  27#include "ti_sci_proc.h"
  28
  29/* This address can either be for ATCM or BTCM with the other at address 0x0 */
  30#define K3_R5_TCM_DEV_ADDR      0x41010000
  31
  32/* R5 TI-SCI Processor Configuration Flags */
  33#define PROC_BOOT_CFG_FLAG_R5_DBG_EN                    0x00000001
  34#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN                 0x00000002
  35#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP                  0x00000100
  36#define PROC_BOOT_CFG_FLAG_R5_TEINIT                    0x00000200
  37#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN                   0x00000400
  38#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE               0x00000800
  39#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN                   0x00001000
  40#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN                   0x00002000
  41/* Available from J7200 SoCs onwards */
  42#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS              0x00004000
  43/* Applicable to only AM64x SoCs */
  44#define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE               0x00008000
  45
  46/* R5 TI-SCI Processor Control Flags */
  47#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT                0x00000001
  48
  49/* R5 TI-SCI Processor Status Flags */
  50#define PROC_BOOT_STATUS_FLAG_R5_WFE                    0x00000001
  51#define PROC_BOOT_STATUS_FLAG_R5_WFI                    0x00000002
  52#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED              0x00000004
  53#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED     0x00000100
  54/* Applicable to only AM64x SoCs */
  55#define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY        0x00000200
  56
  57/**
  58 * struct k3_r5_mem - internal memory structure
  59 * @cpu_addr: MPU virtual address of the memory region
  60 * @bus_addr: Bus address used to access the memory region
  61 * @dev_addr: Device address from remoteproc view
  62 * @size: Size of the memory region
  63 */
  64struct k3_r5_mem {
  65        void __iomem *cpu_addr;
  66        phys_addr_t bus_addr;
  67        u32 dev_addr;
  68        size_t size;
  69};
  70
  71/*
  72 * All cluster mode values are not applicable on all SoCs. The following
  73 * are the modes supported on various SoCs:
  74 *   Split mode      : AM65x, J721E, J7200 and AM64x SoCs
  75 *   LockStep mode   : AM65x, J721E and J7200 SoCs
  76 *   Single-CPU mode : AM64x SoCs only
  77 */
  78enum cluster_mode {
  79        CLUSTER_MODE_SPLIT = 0,
  80        CLUSTER_MODE_LOCKSTEP,
  81        CLUSTER_MODE_SINGLECPU,
  82};
  83
  84/**
  85 * struct k3_r5_soc_data - match data to handle SoC variations
  86 * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
  87 * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
  88 * @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode
  89 */
  90struct k3_r5_soc_data {
  91        bool tcm_is_double;
  92        bool tcm_ecc_autoinit;
  93        bool single_cpu_mode;
  94};
  95
  96/**
  97 * struct k3_r5_cluster - K3 R5F Cluster structure
  98 * @dev: cached device pointer
  99 * @mode: Mode to configure the Cluster - Split or LockStep
 100 * @cores: list of R5 cores within the cluster
 101 * @soc_data: SoC-specific feature data for a R5FSS
 102 */
 103struct k3_r5_cluster {
 104        struct device *dev;
 105        enum cluster_mode mode;
 106        struct list_head cores;
 107        const struct k3_r5_soc_data *soc_data;
 108};
 109
 110/**
 111 * struct k3_r5_core - K3 R5 core structure
 112 * @elem: linked list item
 113 * @dev: cached device pointer
 114 * @rproc: rproc handle representing this core
 115 * @mem: internal memory regions data
 116 * @sram: on-chip SRAM memory regions data
 117 * @num_mems: number of internal memory regions
 118 * @num_sram: number of on-chip SRAM memory regions
 119 * @reset: reset control handle
 120 * @tsp: TI-SCI processor control handle
 121 * @ti_sci: TI-SCI handle
 122 * @ti_sci_id: TI-SCI device identifier
 123 * @atcm_enable: flag to control ATCM enablement
 124 * @btcm_enable: flag to control BTCM enablement
 125 * @loczrama: flag to dictate which TCM is at device address 0x0
 126 */
 127struct k3_r5_core {
 128        struct list_head elem;
 129        struct device *dev;
 130        struct rproc *rproc;
 131        struct k3_r5_mem *mem;
 132        struct k3_r5_mem *sram;
 133        int num_mems;
 134        int num_sram;
 135        struct reset_control *reset;
 136        struct ti_sci_proc *tsp;
 137        const struct ti_sci_handle *ti_sci;
 138        u32 ti_sci_id;
 139        u32 atcm_enable;
 140        u32 btcm_enable;
 141        u32 loczrama;
 142};
 143
 144/**
 145 * struct k3_r5_rproc - K3 remote processor state
 146 * @dev: cached device pointer
 147 * @cluster: cached pointer to parent cluster structure
 148 * @mbox: mailbox channel handle
 149 * @client: mailbox client to request the mailbox channel
 150 * @rproc: rproc handle
 151 * @core: cached pointer to r5 core structure being used
 152 * @rmem: reserved memory regions data
 153 * @num_rmems: number of reserved memory regions
 154 */
 155struct k3_r5_rproc {
 156        struct device *dev;
 157        struct k3_r5_cluster *cluster;
 158        struct mbox_chan *mbox;
 159        struct mbox_client client;
 160        struct rproc *rproc;
 161        struct k3_r5_core *core;
 162        struct k3_r5_mem *rmem;
 163        int num_rmems;
 164};
 165
 166/**
 167 * k3_r5_rproc_mbox_callback() - inbound mailbox message handler
 168 * @client: mailbox client pointer used for requesting the mailbox channel
 169 * @data: mailbox payload
 170 *
 171 * This handler is invoked by the OMAP mailbox driver whenever a mailbox
 172 * message is received. Usually, the mailbox payload simply contains
 173 * the index of the virtqueue that is kicked by the remote processor,
 174 * and we let remoteproc core handle it.
 175 *
 176 * In addition to virtqueue indices, we also have some out-of-band values
 177 * that indicate different events. Those values are deliberately very
 178 * large so they don't coincide with virtqueue indices.
 179 */
 180static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
 181{
 182        struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
 183                                                client);
 184        struct device *dev = kproc->rproc->dev.parent;
 185        const char *name = kproc->rproc->name;
 186        u32 msg = omap_mbox_message(data);
 187
 188        dev_dbg(dev, "mbox msg: 0x%x\n", msg);
 189
 190        switch (msg) {
 191        case RP_MBOX_CRASH:
 192                /*
 193                 * remoteproc detected an exception, but error recovery is not
 194                 * supported. So, just log this for now
 195                 */
 196                dev_err(dev, "K3 R5F rproc %s crashed\n", name);
 197                break;
 198        case RP_MBOX_ECHO_REPLY:
 199                dev_info(dev, "received echo reply from %s\n", name);
 200                break;
 201        default:
 202                /* silently handle all other valid messages */
 203                if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
 204                        return;
 205                if (msg > kproc->rproc->max_notifyid) {
 206                        dev_dbg(dev, "dropping unknown message 0x%x", msg);
 207                        return;
 208                }
 209                /* msg contains the index of the triggered vring */
 210                if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
 211                        dev_dbg(dev, "no message was found in vqid %d\n", msg);
 212        }
 213}
 214
 215/* kick a virtqueue */
 216static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
 217{
 218        struct k3_r5_rproc *kproc = rproc->priv;
 219        struct device *dev = rproc->dev.parent;
 220        mbox_msg_t msg = (mbox_msg_t)vqid;
 221        int ret;
 222
 223        /* send the index of the triggered virtqueue in the mailbox payload */
 224        ret = mbox_send_message(kproc->mbox, (void *)msg);
 225        if (ret < 0)
 226                dev_err(dev, "failed to send mailbox message, status = %d\n",
 227                        ret);
 228}
 229
 230static int k3_r5_split_reset(struct k3_r5_core *core)
 231{
 232        int ret;
 233
 234        ret = reset_control_assert(core->reset);
 235        if (ret) {
 236                dev_err(core->dev, "local-reset assert failed, ret = %d\n",
 237                        ret);
 238                return ret;
 239        }
 240
 241        ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
 242                                                   core->ti_sci_id);
 243        if (ret) {
 244                dev_err(core->dev, "module-reset assert failed, ret = %d\n",
 245                        ret);
 246                if (reset_control_deassert(core->reset))
 247                        dev_warn(core->dev, "local-reset deassert back failed\n");
 248        }
 249
 250        return ret;
 251}
 252
 253static int k3_r5_split_release(struct k3_r5_core *core)
 254{
 255        int ret;
 256
 257        ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
 258                                                   core->ti_sci_id);
 259        if (ret) {
 260                dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
 261                        ret);
 262                return ret;
 263        }
 264
 265        ret = reset_control_deassert(core->reset);
 266        if (ret) {
 267                dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
 268                        ret);
 269                if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
 270                                                         core->ti_sci_id))
 271                        dev_warn(core->dev, "module-reset assert back failed\n");
 272        }
 273
 274        return ret;
 275}
 276
 277static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
 278{
 279        struct k3_r5_core *core;
 280        int ret;
 281
 282        /* assert local reset on all applicable cores */
 283        list_for_each_entry(core, &cluster->cores, elem) {
 284                ret = reset_control_assert(core->reset);
 285                if (ret) {
 286                        dev_err(core->dev, "local-reset assert failed, ret = %d\n",
 287                                ret);
 288                        core = list_prev_entry(core, elem);
 289                        goto unroll_local_reset;
 290                }
 291        }
 292
 293        /* disable PSC modules on all applicable cores */
 294        list_for_each_entry(core, &cluster->cores, elem) {
 295                ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
 296                                                           core->ti_sci_id);
 297                if (ret) {
 298                        dev_err(core->dev, "module-reset assert failed, ret = %d\n",
 299                                ret);
 300                        goto unroll_module_reset;
 301                }
 302        }
 303
 304        return 0;
 305
 306unroll_module_reset:
 307        list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
 308                if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
 309                                                         core->ti_sci_id))
 310                        dev_warn(core->dev, "module-reset assert back failed\n");
 311        }
 312        core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
 313unroll_local_reset:
 314        list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
 315                if (reset_control_deassert(core->reset))
 316                        dev_warn(core->dev, "local-reset deassert back failed\n");
 317        }
 318
 319        return ret;
 320}
 321
 322static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
 323{
 324        struct k3_r5_core *core;
 325        int ret;
 326
 327        /* enable PSC modules on all applicable cores */
 328        list_for_each_entry_reverse(core, &cluster->cores, elem) {
 329                ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
 330                                                           core->ti_sci_id);
 331                if (ret) {
 332                        dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
 333                                ret);
 334                        core = list_next_entry(core, elem);
 335                        goto unroll_module_reset;
 336                }
 337        }
 338
 339        /* deassert local reset on all applicable cores */
 340        list_for_each_entry_reverse(core, &cluster->cores, elem) {
 341                ret = reset_control_deassert(core->reset);
 342                if (ret) {
 343                        dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
 344                                ret);
 345                        goto unroll_local_reset;
 346                }
 347        }
 348
 349        return 0;
 350
 351unroll_local_reset:
 352        list_for_each_entry_continue(core, &cluster->cores, elem) {
 353                if (reset_control_assert(core->reset))
 354                        dev_warn(core->dev, "local-reset assert back failed\n");
 355        }
 356        core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
 357unroll_module_reset:
 358        list_for_each_entry_from(core, &cluster->cores, elem) {
 359                if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
 360                                                         core->ti_sci_id))
 361                        dev_warn(core->dev, "module-reset assert back failed\n");
 362        }
 363
 364        return ret;
 365}
 366
 367static inline int k3_r5_core_halt(struct k3_r5_core *core)
 368{
 369        return ti_sci_proc_set_control(core->tsp,
 370                                       PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
 371}
 372
 373static inline int k3_r5_core_run(struct k3_r5_core *core)
 374{
 375        return ti_sci_proc_set_control(core->tsp,
 376                                       0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
 377}
 378
 379/*
 380 * The R5F cores have controls for both a reset and a halt/run. The code
 381 * execution from DDR requires the initial boot-strapping code to be run
 382 * from the internal TCMs. This function is used to release the resets on
 383 * applicable cores to allow loading into the TCMs. The .prepare() ops is
 384 * invoked by remoteproc core before any firmware loading, and is followed
 385 * by the .start() ops after loading to actually let the R5 cores run.
 386 *
 387 * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to
 388 * execute code, but combines the TCMs from both cores. The resets for both
 389 * cores need to be released to make this possible, as the TCMs are in general
 390 * private to each core. Only Core0 needs to be unhalted for running the
 391 * cluster in this mode. The function uses the same reset logic as LockStep
 392 * mode for this (though the behavior is agnostic of the reset release order).
 393 */
 394static int k3_r5_rproc_prepare(struct rproc *rproc)
 395{
 396        struct k3_r5_rproc *kproc = rproc->priv;
 397        struct k3_r5_cluster *cluster = kproc->cluster;
 398        struct k3_r5_core *core = kproc->core;
 399        struct device *dev = kproc->dev;
 400        u32 ctrl = 0, cfg = 0, stat = 0;
 401        u64 boot_vec = 0;
 402        bool mem_init_dis;
 403        int ret;
 404
 405        ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
 406        if (ret < 0)
 407                return ret;
 408        mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
 409
 410        /* Re-use LockStep-mode reset logic for Single-CPU mode */
 411        ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
 412               cluster->mode == CLUSTER_MODE_SINGLECPU) ?
 413                k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
 414        if (ret) {
 415                dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
 416                        ret);
 417                return ret;
 418        }
 419
 420        /*
 421         * Newer IP revisions like on J7200 SoCs support h/w auto-initialization
 422         * of TCMs, so there is no need to perform the s/w memzero. This bit is
 423         * configurable through System Firmware, the default value does perform
 424         * auto-init, but account for it in case it is disabled
 425         */
 426        if (cluster->soc_data->tcm_ecc_autoinit && !mem_init_dis) {
 427                dev_dbg(dev, "leveraging h/w init for TCM memories\n");
 428                return 0;
 429        }
 430
 431        /*
 432         * Zero out both TCMs unconditionally (access from v8 Arm core is not
 433         * affected by ATCM & BTCM enable configuration values) so that ECC
 434         * can be effective on all TCM addresses.
 435         */
 436        dev_dbg(dev, "zeroing out ATCM memory\n");
 437        memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
 438
 439        dev_dbg(dev, "zeroing out BTCM memory\n");
 440        memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
 441
 442        return 0;
 443}
 444
 445/*
 446 * This function implements the .unprepare() ops and performs the complimentary
 447 * operations to that of the .prepare() ops. The function is used to assert the
 448 * resets on all applicable cores for the rproc device (depending on LockStep
 449 * or Split mode). This completes the second portion of powering down the R5F
 450 * cores. The cores themselves are only halted in the .stop() ops, and the
 451 * .unprepare() ops is invoked by the remoteproc core after the remoteproc is
 452 * stopped.
 453 *
 454 * The Single-CPU mode on applicable SoCs (eg: AM64x) combines the TCMs from
 455 * both cores. The access is made possible only with releasing the resets for
 456 * both cores, but with only Core0 unhalted. This function re-uses the same
 457 * reset assert logic as LockStep mode for this mode (though the behavior is
 458 * agnostic of the reset assert order).
 459 */
 460static int k3_r5_rproc_unprepare(struct rproc *rproc)
 461{
 462        struct k3_r5_rproc *kproc = rproc->priv;
 463        struct k3_r5_cluster *cluster = kproc->cluster;
 464        struct k3_r5_core *core = kproc->core;
 465        struct device *dev = kproc->dev;
 466        int ret;
 467
 468        /* Re-use LockStep-mode reset logic for Single-CPU mode */
 469        ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
 470               cluster->mode == CLUSTER_MODE_SINGLECPU) ?
 471                k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
 472        if (ret)
 473                dev_err(dev, "unable to disable cores, ret = %d\n", ret);
 474
 475        return ret;
 476}
 477
 478/*
 479 * The R5F start sequence includes two different operations
 480 * 1. Configure the boot vector for R5F core(s)
 481 * 2. Unhalt/Run the R5F core(s)
 482 *
 483 * The sequence is different between LockStep and Split modes. The LockStep
 484 * mode requires the boot vector to be configured only for Core0, and then
 485 * unhalt both the cores to start the execution - Core1 needs to be unhalted
 486 * first followed by Core0. The Split-mode requires that Core0 to be maintained
 487 * always in a higher power state that Core1 (implying Core1 needs to be started
 488 * always only after Core0 is started).
 489 *
 490 * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
 491 * code, so only Core0 needs to be unhalted. The function uses the same logic
 492 * flow as Split-mode for this.
 493 */
 494static int k3_r5_rproc_start(struct rproc *rproc)
 495{
 496        struct k3_r5_rproc *kproc = rproc->priv;
 497        struct k3_r5_cluster *cluster = kproc->cluster;
 498        struct mbox_client *client = &kproc->client;
 499        struct device *dev = kproc->dev;
 500        struct k3_r5_core *core;
 501        u32 boot_addr;
 502        int ret;
 503
 504        client->dev = dev;
 505        client->tx_done = NULL;
 506        client->rx_callback = k3_r5_rproc_mbox_callback;
 507        client->tx_block = false;
 508        client->knows_txdone = false;
 509
 510        kproc->mbox = mbox_request_channel(client, 0);
 511        if (IS_ERR(kproc->mbox)) {
 512                ret = -EBUSY;
 513                dev_err(dev, "mbox_request_channel failed: %ld\n",
 514                        PTR_ERR(kproc->mbox));
 515                return ret;
 516        }
 517
 518        /*
 519         * Ping the remote processor, this is only for sanity-sake for now;
 520         * there is no functional effect whatsoever.
 521         *
 522         * Note that the reply will _not_ arrive immediately: this message
 523         * will wait in the mailbox fifo until the remote processor is booted.
 524         */
 525        ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
 526        if (ret < 0) {
 527                dev_err(dev, "mbox_send_message failed: %d\n", ret);
 528                goto put_mbox;
 529        }
 530
 531        boot_addr = rproc->bootaddr;
 532        /* TODO: add boot_addr sanity checking */
 533        dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
 534
 535        /* boot vector need not be programmed for Core1 in LockStep mode */
 536        core = kproc->core;
 537        ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
 538        if (ret)
 539                goto put_mbox;
 540
 541        /* unhalt/run all applicable cores */
 542        if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 543                list_for_each_entry_reverse(core, &cluster->cores, elem) {
 544                        ret = k3_r5_core_run(core);
 545                        if (ret)
 546                                goto unroll_core_run;
 547                }
 548        } else {
 549                ret = k3_r5_core_run(core);
 550                if (ret)
 551                        goto put_mbox;
 552        }
 553
 554        return 0;
 555
 556unroll_core_run:
 557        list_for_each_entry_continue(core, &cluster->cores, elem) {
 558                if (k3_r5_core_halt(core))
 559                        dev_warn(core->dev, "core halt back failed\n");
 560        }
 561put_mbox:
 562        mbox_free_channel(kproc->mbox);
 563        return ret;
 564}
 565
 566/*
 567 * The R5F stop function includes the following operations
 568 * 1. Halt R5F core(s)
 569 *
 570 * The sequence is different between LockStep and Split modes, and the order
 571 * of cores the operations are performed are also in general reverse to that
 572 * of the start function. The LockStep mode requires each operation to be
 573 * performed first on Core0 followed by Core1. The Split-mode requires that
 574 * Core0 to be maintained always in a higher power state that Core1 (implying
 575 * Core1 needs to be stopped first before Core0).
 576 *
 577 * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
 578 * code, so only Core0 needs to be halted. The function uses the same logic
 579 * flow as Split-mode for this.
 580 *
 581 * Note that the R5F halt operation in general is not effective when the R5F
 582 * core is running, but is needed to make sure the core won't run after
 583 * deasserting the reset the subsequent time. The asserting of reset can
 584 * be done here, but is preferred to be done in the .unprepare() ops - this
 585 * maintains the symmetric behavior between the .start(), .stop(), .prepare()
 586 * and .unprepare() ops, and also balances them well between sysfs 'state'
 587 * flow and device bind/unbind or module removal.
 588 */
 589static int k3_r5_rproc_stop(struct rproc *rproc)
 590{
 591        struct k3_r5_rproc *kproc = rproc->priv;
 592        struct k3_r5_cluster *cluster = kproc->cluster;
 593        struct k3_r5_core *core = kproc->core;
 594        int ret;
 595
 596        /* halt all applicable cores */
 597        if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 598                list_for_each_entry(core, &cluster->cores, elem) {
 599                        ret = k3_r5_core_halt(core);
 600                        if (ret) {
 601                                core = list_prev_entry(core, elem);
 602                                goto unroll_core_halt;
 603                        }
 604                }
 605        } else {
 606                ret = k3_r5_core_halt(core);
 607                if (ret)
 608                        goto out;
 609        }
 610
 611        mbox_free_channel(kproc->mbox);
 612
 613        return 0;
 614
 615unroll_core_halt:
 616        list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
 617                if (k3_r5_core_run(core))
 618                        dev_warn(core->dev, "core run back failed\n");
 619        }
 620out:
 621        return ret;
 622}
 623
 624/*
 625 * Internal Memory translation helper
 626 *
 627 * Custom function implementing the rproc .da_to_va ops to provide address
 628 * translation (device address to kernel virtual address) for internal RAMs
 629 * present in a DSP or IPU device). The translated addresses can be used
 630 * either by the remoteproc core for loading, or by any rpmsg bus drivers.
 631 */
 632static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
 633{
 634        struct k3_r5_rproc *kproc = rproc->priv;
 635        struct k3_r5_core *core = kproc->core;
 636        void __iomem *va = NULL;
 637        phys_addr_t bus_addr;
 638        u32 dev_addr, offset;
 639        size_t size;
 640        int i;
 641
 642        if (len == 0)
 643                return NULL;
 644
 645        /* handle both R5 and SoC views of ATCM and BTCM */
 646        for (i = 0; i < core->num_mems; i++) {
 647                bus_addr = core->mem[i].bus_addr;
 648                dev_addr = core->mem[i].dev_addr;
 649                size = core->mem[i].size;
 650
 651                /* handle R5-view addresses of TCMs */
 652                if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
 653                        offset = da - dev_addr;
 654                        va = core->mem[i].cpu_addr + offset;
 655                        return (__force void *)va;
 656                }
 657
 658                /* handle SoC-view addresses of TCMs */
 659                if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
 660                        offset = da - bus_addr;
 661                        va = core->mem[i].cpu_addr + offset;
 662                        return (__force void *)va;
 663                }
 664        }
 665
 666        /* handle any SRAM regions using SoC-view addresses */
 667        for (i = 0; i < core->num_sram; i++) {
 668                dev_addr = core->sram[i].dev_addr;
 669                size = core->sram[i].size;
 670
 671                if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
 672                        offset = da - dev_addr;
 673                        va = core->sram[i].cpu_addr + offset;
 674                        return (__force void *)va;
 675                }
 676        }
 677
 678        /* handle static DDR reserved memory regions */
 679        for (i = 0; i < kproc->num_rmems; i++) {
 680                dev_addr = kproc->rmem[i].dev_addr;
 681                size = kproc->rmem[i].size;
 682
 683                if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
 684                        offset = da - dev_addr;
 685                        va = kproc->rmem[i].cpu_addr + offset;
 686                        return (__force void *)va;
 687                }
 688        }
 689
 690        return NULL;
 691}
 692
 693static const struct rproc_ops k3_r5_rproc_ops = {
 694        .prepare        = k3_r5_rproc_prepare,
 695        .unprepare      = k3_r5_rproc_unprepare,
 696        .start          = k3_r5_rproc_start,
 697        .stop           = k3_r5_rproc_stop,
 698        .kick           = k3_r5_rproc_kick,
 699        .da_to_va       = k3_r5_rproc_da_to_va,
 700};
 701
 702/*
 703 * Internal R5F Core configuration
 704 *
 705 * Each R5FSS has a cluster-level setting for configuring the processor
 706 * subsystem either in a safety/fault-tolerant LockStep mode or a performance
 707 * oriented Split mode on most SoCs. A fewer SoCs support a non-safety mode
 708 * as an alternate for LockStep mode that exercises only a single R5F core
 709 * called Single-CPU mode. Each R5F core has a number of settings to either
 710 * enable/disable each of the TCMs, control which TCM appears at the R5F core's
 711 * address 0x0. These settings need to be configured before the resets for the
 712 * corresponding core are released. These settings are all protected and managed
 713 * by the System Processor.
 714 *
 715 * This function is used to pre-configure these settings for each R5F core, and
 716 * the configuration is all done through various ti_sci_proc functions that
 717 * communicate with the System Processor. The function also ensures that both
 718 * the cores are halted before the .prepare() step.
 719 *
 720 * The function is called from k3_r5_cluster_rproc_init() and is invoked either
 721 * once (in LockStep mode or Single-CPU modes) or twice (in Split mode). Support
 722 * for LockStep-mode is dictated by an eFUSE register bit, and the config
 723 * settings retrieved from DT are adjusted accordingly as per the permitted
 724 * cluster mode. Another eFUSE register bit dictates if the R5F cluster only
 725 * supports a Single-CPU mode. All cluster level settings like Cluster mode and
 726 * TEINIT (exception handling state dictating ARM or Thumb mode) can only be set
 727 * and retrieved using Core0.
 728 *
 729 * The function behavior is different based on the cluster mode. The R5F cores
 730 * are configured independently as per their individual settings in Split mode.
 731 * They are identically configured in LockStep mode using the primary Core0
 732 * settings. However, some individual settings cannot be set in LockStep mode.
 733 * This is overcome by switching to Split-mode initially and then programming
 734 * both the cores with the same settings, before reconfiguing again for
 735 * LockStep mode.
 736 */
 737static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
 738{
 739        struct k3_r5_cluster *cluster = kproc->cluster;
 740        struct device *dev = kproc->dev;
 741        struct k3_r5_core *core0, *core, *temp;
 742        u32 ctrl = 0, cfg = 0, stat = 0;
 743        u32 set_cfg = 0, clr_cfg = 0;
 744        u64 boot_vec = 0;
 745        bool lockstep_en;
 746        bool single_cpu;
 747        int ret;
 748
 749        core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
 750        if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
 751            cluster->mode == CLUSTER_MODE_SINGLECPU) {
 752                core = core0;
 753        } else {
 754                core = kproc->core;
 755        }
 756
 757        ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
 758                                     &stat);
 759        if (ret < 0)
 760                return ret;
 761
 762        dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
 763                boot_vec, cfg, ctrl, stat);
 764
 765        /* check if only Single-CPU mode is supported on applicable SoCs */
 766        if (cluster->soc_data->single_cpu_mode) {
 767                single_cpu =
 768                        !!(stat & PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY);
 769                if (single_cpu && cluster->mode == CLUSTER_MODE_SPLIT) {
 770                        dev_err(cluster->dev, "split-mode not permitted, force configuring for single-cpu mode\n");
 771                        cluster->mode = CLUSTER_MODE_SINGLECPU;
 772                }
 773                goto config;
 774        }
 775
 776        /* check conventional LockStep vs Split mode configuration */
 777        lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
 778        if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 779                dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
 780                cluster->mode = CLUSTER_MODE_SPLIT;
 781        }
 782
 783config:
 784        /* always enable ARM mode and set boot vector to 0 */
 785        boot_vec = 0x0;
 786        if (core == core0) {
 787                clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
 788                if (cluster->soc_data->single_cpu_mode) {
 789                        /*
 790                         * Single-CPU configuration bit can only be configured
 791                         * on Core0 and system firmware will NACK any requests
 792                         * with the bit configured, so program it only on
 793                         * permitted cores
 794                         */
 795                        if (cluster->mode == CLUSTER_MODE_SINGLECPU)
 796                                set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE;
 797                } else {
 798                        /*
 799                         * LockStep configuration bit is Read-only on Split-mode
 800                         * _only_ devices and system firmware will NACK any
 801                         * requests with the bit configured, so program it only
 802                         * on permitted devices
 803                         */
 804                        if (lockstep_en)
 805                                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
 806                }
 807        }
 808
 809        if (core->atcm_enable)
 810                set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
 811        else
 812                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
 813
 814        if (core->btcm_enable)
 815                set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
 816        else
 817                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
 818
 819        if (core->loczrama)
 820                set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
 821        else
 822                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
 823
 824        if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 825                /*
 826                 * work around system firmware limitations to make sure both
 827                 * cores are programmed symmetrically in LockStep. LockStep
 828                 * and TEINIT config is only allowed with Core0.
 829                 */
 830                list_for_each_entry(temp, &cluster->cores, elem) {
 831                        ret = k3_r5_core_halt(temp);
 832                        if (ret)
 833                                goto out;
 834
 835                        if (temp != core) {
 836                                clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
 837                                clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
 838                        }
 839                        ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
 840                                                     set_cfg, clr_cfg);
 841                        if (ret)
 842                                goto out;
 843                }
 844
 845                set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
 846                clr_cfg = 0;
 847                ret = ti_sci_proc_set_config(core->tsp, boot_vec,
 848                                             set_cfg, clr_cfg);
 849        } else {
 850                ret = k3_r5_core_halt(core);
 851                if (ret)
 852                        goto out;
 853
 854                ret = ti_sci_proc_set_config(core->tsp, boot_vec,
 855                                             set_cfg, clr_cfg);
 856        }
 857
 858out:
 859        return ret;
 860}
 861
 862static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
 863{
 864        struct device *dev = kproc->dev;
 865        struct device_node *np = dev_of_node(dev);
 866        struct device_node *rmem_np;
 867        struct reserved_mem *rmem;
 868        int num_rmems;
 869        int ret, i;
 870
 871        num_rmems = of_property_count_elems_of_size(np, "memory-region",
 872                                                    sizeof(phandle));
 873        if (num_rmems <= 0) {
 874                dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
 875                        num_rmems);
 876                return -EINVAL;
 877        }
 878        if (num_rmems < 2) {
 879                dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
 880                        num_rmems);
 881                return -EINVAL;
 882        }
 883
 884        /* use reserved memory region 0 for vring DMA allocations */
 885        ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
 886        if (ret) {
 887                dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
 888                        ret);
 889                return ret;
 890        }
 891
 892        num_rmems--;
 893        kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
 894        if (!kproc->rmem) {
 895                ret = -ENOMEM;
 896                goto release_rmem;
 897        }
 898
 899        /* use remaining reserved memory regions for static carveouts */
 900        for (i = 0; i < num_rmems; i++) {
 901                rmem_np = of_parse_phandle(np, "memory-region", i + 1);
 902                if (!rmem_np) {
 903                        ret = -EINVAL;
 904                        goto unmap_rmem;
 905                }
 906
 907                rmem = of_reserved_mem_lookup(rmem_np);
 908                if (!rmem) {
 909                        of_node_put(rmem_np);
 910                        ret = -EINVAL;
 911                        goto unmap_rmem;
 912                }
 913                of_node_put(rmem_np);
 914
 915                kproc->rmem[i].bus_addr = rmem->base;
 916                /*
 917                 * R5Fs do not have an MMU, but have a Region Address Translator
 918                 * (RAT) module that provides a fixed entry translation between
 919                 * the 32-bit processor addresses to 64-bit bus addresses. The
 920                 * RAT is programmable only by the R5F cores. Support for RAT
 921                 * is currently not supported, so 64-bit address regions are not
 922                 * supported. The absence of MMUs implies that the R5F device
 923                 * addresses/supported memory regions are restricted to 32-bit
 924                 * bus addresses, and are identical
 925                 */
 926                kproc->rmem[i].dev_addr = (u32)rmem->base;
 927                kproc->rmem[i].size = rmem->size;
 928                kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
 929                if (!kproc->rmem[i].cpu_addr) {
 930                        dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
 931                                i + 1, &rmem->base, &rmem->size);
 932                        ret = -ENOMEM;
 933                        goto unmap_rmem;
 934                }
 935
 936                dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
 937                        i + 1, &kproc->rmem[i].bus_addr,
 938                        kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
 939                        kproc->rmem[i].dev_addr);
 940        }
 941        kproc->num_rmems = num_rmems;
 942
 943        return 0;
 944
 945unmap_rmem:
 946        for (i--; i >= 0; i--)
 947                iounmap(kproc->rmem[i].cpu_addr);
 948        kfree(kproc->rmem);
 949release_rmem:
 950        of_reserved_mem_device_release(dev);
 951        return ret;
 952}
 953
 954static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
 955{
 956        int i;
 957
 958        for (i = 0; i < kproc->num_rmems; i++)
 959                iounmap(kproc->rmem[i].cpu_addr);
 960        kfree(kproc->rmem);
 961
 962        of_reserved_mem_device_release(kproc->dev);
 963}
 964
 965/*
 966 * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
 967 * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
 968 * cores are usable in Split-mode, but only the Core0 TCMs can be used in
 969 * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
 970 * leveraging the Core1 TCMs as well in certain modes where they would have
 971 * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs, Single-CPU mode on
 972 * AM64x SoCs). This is done by making a Core1 TCM visible immediately after the
 973 * corresponding Core0 TCM. The SoC memory map uses the larger 64 KB sizes for
 974 * the Core0 TCMs, and the dts representation reflects this increased size on
 975 * supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only
 976 * half the original size in Split mode.
 977 */
 978static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
 979{
 980        struct k3_r5_cluster *cluster = kproc->cluster;
 981        struct k3_r5_core *core = kproc->core;
 982        struct device *cdev = core->dev;
 983        struct k3_r5_core *core0;
 984
 985        if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
 986            cluster->mode == CLUSTER_MODE_SINGLECPU ||
 987            !cluster->soc_data->tcm_is_double)
 988                return;
 989
 990        core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
 991        if (core == core0) {
 992                WARN_ON(core->mem[0].size != SZ_64K);
 993                WARN_ON(core->mem[1].size != SZ_64K);
 994
 995                core->mem[0].size /= 2;
 996                core->mem[1].size /= 2;
 997
 998                dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
 999                        core->mem[0].size, core->mem[1].size);
1000        }
1001}
1002
1003static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
1004{
1005        struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1006        struct device *dev = &pdev->dev;
1007        struct k3_r5_rproc *kproc;
1008        struct k3_r5_core *core, *core1;
1009        struct device *cdev;
1010        const char *fw_name;
1011        struct rproc *rproc;
1012        int ret;
1013
1014        core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
1015        list_for_each_entry(core, &cluster->cores, elem) {
1016                cdev = core->dev;
1017                ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
1018                if (ret) {
1019                        dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
1020                                ret);
1021                        goto out;
1022                }
1023
1024                rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
1025                                    fw_name, sizeof(*kproc));
1026                if (!rproc) {
1027                        ret = -ENOMEM;
1028                        goto out;
1029                }
1030
1031                /* K3 R5s have a Region Address Translator (RAT) but no MMU */
1032                rproc->has_iommu = false;
1033                /* error recovery is not supported at present */
1034                rproc->recovery_disabled = true;
1035
1036                kproc = rproc->priv;
1037                kproc->cluster = cluster;
1038                kproc->core = core;
1039                kproc->dev = cdev;
1040                kproc->rproc = rproc;
1041                core->rproc = rproc;
1042
1043                ret = k3_r5_rproc_configure(kproc);
1044                if (ret) {
1045                        dev_err(dev, "initial configure failed, ret = %d\n",
1046                                ret);
1047                        goto err_config;
1048                }
1049
1050                k3_r5_adjust_tcm_sizes(kproc);
1051
1052                ret = k3_r5_reserved_mem_init(kproc);
1053                if (ret) {
1054                        dev_err(dev, "reserved memory init failed, ret = %d\n",
1055                                ret);
1056                        goto err_config;
1057                }
1058
1059                ret = rproc_add(rproc);
1060                if (ret) {
1061                        dev_err(dev, "rproc_add failed, ret = %d\n", ret);
1062                        goto err_add;
1063                }
1064
1065                /* create only one rproc in lockstep mode or single-cpu mode */
1066                if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
1067                    cluster->mode == CLUSTER_MODE_SINGLECPU)
1068                        break;
1069        }
1070
1071        return 0;
1072
1073err_split:
1074        rproc_del(rproc);
1075err_add:
1076        k3_r5_reserved_mem_exit(kproc);
1077err_config:
1078        rproc_free(rproc);
1079        core->rproc = NULL;
1080out:
1081        /* undo core0 upon any failures on core1 in split-mode */
1082        if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
1083                core = list_prev_entry(core, elem);
1084                rproc = core->rproc;
1085                kproc = rproc->priv;
1086                goto err_split;
1087        }
1088        return ret;
1089}
1090
1091static void k3_r5_cluster_rproc_exit(void *data)
1092{
1093        struct k3_r5_cluster *cluster = platform_get_drvdata(data);
1094        struct k3_r5_rproc *kproc;
1095        struct k3_r5_core *core;
1096        struct rproc *rproc;
1097
1098        /*
1099         * lockstep mode and single-cpu modes have only one rproc associated
1100         * with first core, whereas split-mode has two rprocs associated with
1101         * each core, and requires that core1 be powered down first
1102         */
1103        core = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
1104                cluster->mode == CLUSTER_MODE_SINGLECPU) ?
1105                list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
1106                list_last_entry(&cluster->cores, struct k3_r5_core, elem);
1107
1108        list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
1109                rproc = core->rproc;
1110                kproc = rproc->priv;
1111
1112                rproc_del(rproc);
1113
1114                k3_r5_reserved_mem_exit(kproc);
1115
1116                rproc_free(rproc);
1117                core->rproc = NULL;
1118        }
1119}
1120
1121static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
1122                                               struct k3_r5_core *core)
1123{
1124        static const char * const mem_names[] = {"atcm", "btcm"};
1125        struct device *dev = &pdev->dev;
1126        struct resource *res;
1127        int num_mems;
1128        int i;
1129
1130        num_mems = ARRAY_SIZE(mem_names);
1131        core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
1132        if (!core->mem)
1133                return -ENOMEM;
1134
1135        for (i = 0; i < num_mems; i++) {
1136                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1137                                                   mem_names[i]);
1138                if (!res) {
1139                        dev_err(dev, "found no memory resource for %s\n",
1140                                mem_names[i]);
1141                        return -EINVAL;
1142                }
1143                if (!devm_request_mem_region(dev, res->start,
1144                                             resource_size(res),
1145                                             dev_name(dev))) {
1146                        dev_err(dev, "could not request %s region for resource\n",
1147                                mem_names[i]);
1148                        return -EBUSY;
1149                }
1150
1151                /*
1152                 * TCMs are designed in general to support RAM-like backing
1153                 * memories. So, map these as Normal Non-Cached memories. This
1154                 * also avoids/fixes any potential alignment faults due to
1155                 * unaligned data accesses when using memcpy() or memset()
1156                 * functions (normally seen with device type memory).
1157                 */
1158                core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
1159                                                        resource_size(res));
1160                if (!core->mem[i].cpu_addr) {
1161                        dev_err(dev, "failed to map %s memory\n", mem_names[i]);
1162                        return -ENOMEM;
1163                }
1164                core->mem[i].bus_addr = res->start;
1165
1166                /*
1167                 * TODO:
1168                 * The R5F cores can place ATCM & BTCM anywhere in its address
1169                 * based on the corresponding Region Registers in the System
1170                 * Control coprocessor. For now, place ATCM and BTCM at
1171                 * addresses 0 and 0x41010000 (same as the bus address on AM65x
1172                 * SoCs) based on loczrama setting
1173                 */
1174                if (!strcmp(mem_names[i], "atcm")) {
1175                        core->mem[i].dev_addr = core->loczrama ?
1176                                                        0 : K3_R5_TCM_DEV_ADDR;
1177                } else {
1178                        core->mem[i].dev_addr = core->loczrama ?
1179                                                        K3_R5_TCM_DEV_ADDR : 0;
1180                }
1181                core->mem[i].size = resource_size(res);
1182
1183                dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1184                        mem_names[i], &core->mem[i].bus_addr,
1185                        core->mem[i].size, core->mem[i].cpu_addr,
1186                        core->mem[i].dev_addr);
1187        }
1188        core->num_mems = num_mems;
1189
1190        return 0;
1191}
1192
1193static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
1194                                           struct k3_r5_core *core)
1195{
1196        struct device_node *np = pdev->dev.of_node;
1197        struct device *dev = &pdev->dev;
1198        struct device_node *sram_np;
1199        struct resource res;
1200        int num_sram;
1201        int i, ret;
1202
1203        num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
1204        if (num_sram <= 0) {
1205                dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
1206                        num_sram);
1207                return 0;
1208        }
1209
1210        core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
1211        if (!core->sram)
1212                return -ENOMEM;
1213
1214        for (i = 0; i < num_sram; i++) {
1215                sram_np = of_parse_phandle(np, "sram", i);
1216                if (!sram_np)
1217                        return -EINVAL;
1218
1219                if (!of_device_is_available(sram_np)) {
1220                        of_node_put(sram_np);
1221                        return -EINVAL;
1222                }
1223
1224                ret = of_address_to_resource(sram_np, 0, &res);
1225                of_node_put(sram_np);
1226                if (ret)
1227                        return -EINVAL;
1228
1229                core->sram[i].bus_addr = res.start;
1230                core->sram[i].dev_addr = res.start;
1231                core->sram[i].size = resource_size(&res);
1232                core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
1233                                                         resource_size(&res));
1234                if (!core->sram[i].cpu_addr) {
1235                        dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
1236                                i, &res.start);
1237                        return -ENOMEM;
1238                }
1239
1240                dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1241                        i, &core->sram[i].bus_addr,
1242                        core->sram[i].size, core->sram[i].cpu_addr,
1243                        core->sram[i].dev_addr);
1244        }
1245        core->num_sram = num_sram;
1246
1247        return 0;
1248}
1249
1250static
1251struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
1252                                          const struct ti_sci_handle *sci)
1253{
1254        struct ti_sci_proc *tsp;
1255        u32 temp[2];
1256        int ret;
1257
1258        ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
1259                                         temp, 2);
1260        if (ret < 0)
1261                return ERR_PTR(ret);
1262
1263        tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
1264        if (!tsp)
1265                return ERR_PTR(-ENOMEM);
1266
1267        tsp->dev = dev;
1268        tsp->sci = sci;
1269        tsp->ops = &sci->ops.proc_ops;
1270        tsp->proc_id = temp[0];
1271        tsp->host_id = temp[1];
1272
1273        return tsp;
1274}
1275
1276static int k3_r5_core_of_init(struct platform_device *pdev)
1277{
1278        struct device *dev = &pdev->dev;
1279        struct device_node *np = dev_of_node(dev);
1280        struct k3_r5_core *core;
1281        int ret;
1282
1283        if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL))
1284                return -ENOMEM;
1285
1286        core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
1287        if (!core) {
1288                ret = -ENOMEM;
1289                goto err;
1290        }
1291
1292        core->dev = dev;
1293        /*
1294         * Use SoC Power-on-Reset values as default if no DT properties are
1295         * used to dictate the TCM configurations
1296         */
1297        core->atcm_enable = 0;
1298        core->btcm_enable = 1;
1299        core->loczrama = 1;
1300
1301        ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable);
1302        if (ret < 0 && ret != -EINVAL) {
1303                dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n",
1304                        ret);
1305                goto err;
1306        }
1307
1308        ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable);
1309        if (ret < 0 && ret != -EINVAL) {
1310                dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n",
1311                        ret);
1312                goto err;
1313        }
1314
1315        ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama);
1316        if (ret < 0 && ret != -EINVAL) {
1317                dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret);
1318                goto err;
1319        }
1320
1321        core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
1322        if (IS_ERR(core->ti_sci)) {
1323                ret = PTR_ERR(core->ti_sci);
1324                if (ret != -EPROBE_DEFER) {
1325                        dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
1326                                ret);
1327                }
1328                core->ti_sci = NULL;
1329                goto err;
1330        }
1331
1332        ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
1333        if (ret) {
1334                dev_err(dev, "missing 'ti,sci-dev-id' property\n");
1335                goto err;
1336        }
1337
1338        core->reset = devm_reset_control_get_exclusive(dev, NULL);
1339        if (IS_ERR_OR_NULL(core->reset)) {
1340                ret = PTR_ERR_OR_ZERO(core->reset);
1341                if (!ret)
1342                        ret = -ENODEV;
1343                if (ret != -EPROBE_DEFER) {
1344                        dev_err(dev, "failed to get reset handle, ret = %d\n",
1345                                ret);
1346                }
1347                goto err;
1348        }
1349
1350        core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
1351        if (IS_ERR(core->tsp)) {
1352                ret = PTR_ERR(core->tsp);
1353                dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
1354                        ret);
1355                goto err;
1356        }
1357
1358        ret = k3_r5_core_of_get_internal_memories(pdev, core);
1359        if (ret) {
1360                dev_err(dev, "failed to get internal memories, ret = %d\n",
1361                        ret);
1362                goto err;
1363        }
1364
1365        ret = k3_r5_core_of_get_sram_memories(pdev, core);
1366        if (ret) {
1367                dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
1368                goto err;
1369        }
1370
1371        ret = ti_sci_proc_request(core->tsp);
1372        if (ret < 0) {
1373                dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
1374                goto err;
1375        }
1376
1377        platform_set_drvdata(pdev, core);
1378        devres_close_group(dev, k3_r5_core_of_init);
1379
1380        return 0;
1381
1382err:
1383        devres_release_group(dev, k3_r5_core_of_init);
1384        return ret;
1385}
1386
1387/*
1388 * free the resources explicitly since driver model is not being used
1389 * for the child R5F devices
1390 */
1391static void k3_r5_core_of_exit(struct platform_device *pdev)
1392{
1393        struct k3_r5_core *core = platform_get_drvdata(pdev);
1394        struct device *dev = &pdev->dev;
1395        int ret;
1396
1397        ret = ti_sci_proc_release(core->tsp);
1398        if (ret)
1399                dev_err(dev, "failed to release proc, ret = %d\n", ret);
1400
1401        platform_set_drvdata(pdev, NULL);
1402        devres_release_group(dev, k3_r5_core_of_init);
1403}
1404
1405static void k3_r5_cluster_of_exit(void *data)
1406{
1407        struct k3_r5_cluster *cluster = platform_get_drvdata(data);
1408        struct platform_device *cpdev;
1409        struct k3_r5_core *core, *temp;
1410
1411        list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) {
1412                list_del(&core->elem);
1413                cpdev = to_platform_device(core->dev);
1414                k3_r5_core_of_exit(cpdev);
1415        }
1416}
1417
1418static int k3_r5_cluster_of_init(struct platform_device *pdev)
1419{
1420        struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1421        struct device *dev = &pdev->dev;
1422        struct device_node *np = dev_of_node(dev);
1423        struct platform_device *cpdev;
1424        struct device_node *child;
1425        struct k3_r5_core *core;
1426        int ret;
1427
1428        for_each_available_child_of_node(np, child) {
1429                cpdev = of_find_device_by_node(child);
1430                if (!cpdev) {
1431                        ret = -ENODEV;
1432                        dev_err(dev, "could not get R5 core platform device\n");
1433                        goto fail;
1434                }
1435
1436                ret = k3_r5_core_of_init(cpdev);
1437                if (ret) {
1438                        dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
1439                                ret);
1440                        put_device(&cpdev->dev);
1441                        goto fail;
1442                }
1443
1444                core = platform_get_drvdata(cpdev);
1445                put_device(&cpdev->dev);
1446                list_add_tail(&core->elem, &cluster->cores);
1447        }
1448
1449        return 0;
1450
1451fail:
1452        k3_r5_cluster_of_exit(pdev);
1453        return ret;
1454}
1455
1456static int k3_r5_probe(struct platform_device *pdev)
1457{
1458        struct device *dev = &pdev->dev;
1459        struct device_node *np = dev_of_node(dev);
1460        struct k3_r5_cluster *cluster;
1461        const struct k3_r5_soc_data *data;
1462        int ret;
1463        int num_cores;
1464
1465        data = of_device_get_match_data(&pdev->dev);
1466        if (!data) {
1467                dev_err(dev, "SoC-specific data is not defined\n");
1468                return -ENODEV;
1469        }
1470
1471        cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
1472        if (!cluster)
1473                return -ENOMEM;
1474
1475        cluster->dev = dev;
1476        /*
1477         * default to most common efuse configurations - Split-mode on AM64x
1478         * and LockStep-mode on all others
1479         */
1480        cluster->mode = data->single_cpu_mode ?
1481                                CLUSTER_MODE_SPLIT : CLUSTER_MODE_LOCKSTEP;
1482        cluster->soc_data = data;
1483        INIT_LIST_HEAD(&cluster->cores);
1484
1485        ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
1486        if (ret < 0 && ret != -EINVAL) {
1487                dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
1488                        ret);
1489                return ret;
1490        }
1491
1492        num_cores = of_get_available_child_count(np);
1493        if (num_cores != 2) {
1494                dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n",
1495                        num_cores);
1496                return -ENODEV;
1497        }
1498
1499        platform_set_drvdata(pdev, cluster);
1500
1501        ret = devm_of_platform_populate(dev);
1502        if (ret) {
1503                dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
1504                        ret);
1505                return ret;
1506        }
1507
1508        ret = k3_r5_cluster_of_init(pdev);
1509        if (ret) {
1510                dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
1511                return ret;
1512        }
1513
1514        ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev);
1515        if (ret)
1516                return ret;
1517
1518        ret = k3_r5_cluster_rproc_init(pdev);
1519        if (ret) {
1520                dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
1521                        ret);
1522                return ret;
1523        }
1524
1525        ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev);
1526        if (ret)
1527                return ret;
1528
1529        return 0;
1530}
1531
1532static const struct k3_r5_soc_data am65_j721e_soc_data = {
1533        .tcm_is_double = false,
1534        .tcm_ecc_autoinit = false,
1535        .single_cpu_mode = false,
1536};
1537
1538static const struct k3_r5_soc_data j7200_soc_data = {
1539        .tcm_is_double = true,
1540        .tcm_ecc_autoinit = true,
1541        .single_cpu_mode = false,
1542};
1543
1544static const struct k3_r5_soc_data am64_soc_data = {
1545        .tcm_is_double = true,
1546        .tcm_ecc_autoinit = true,
1547        .single_cpu_mode = true,
1548};
1549
1550static const struct of_device_id k3_r5_of_match[] = {
1551        { .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
1552        { .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
1553        { .compatible = "ti,j7200-r5fss", .data = &j7200_soc_data, },
1554        { .compatible = "ti,am64-r5fss",  .data = &am64_soc_data, },
1555        { /* sentinel */ },
1556};
1557MODULE_DEVICE_TABLE(of, k3_r5_of_match);
1558
1559static struct platform_driver k3_r5_rproc_driver = {
1560        .probe = k3_r5_probe,
1561        .driver = {
1562                .name = "k3_r5_rproc",
1563                .of_match_table = k3_r5_of_match,
1564        },
1565};
1566
1567module_platform_driver(k3_r5_rproc_driver);
1568
1569MODULE_LICENSE("GPL v2");
1570MODULE_DESCRIPTION("TI K3 R5F remote processor driver");
1571MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
1572