linux/drivers/remoteproc/ti_k3_r5_remoteproc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * TI K3 R5F (MCU) Remote Processor driver
   4 *
   5 * Copyright (C) 2017-2020 Texas Instruments Incorporated - https://www.ti.com/
   6 *      Suman Anna <s-anna@ti.com>
   7 */
   8
   9#include <linux/dma-mapping.h>
  10#include <linux/err.h>
  11#include <linux/interrupt.h>
  12#include <linux/kernel.h>
  13#include <linux/mailbox_client.h>
  14#include <linux/module.h>
  15#include <linux/of_address.h>
  16#include <linux/of_device.h>
  17#include <linux/of_reserved_mem.h>
  18#include <linux/omap-mailbox.h>
  19#include <linux/platform_device.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/remoteproc.h>
  22#include <linux/reset.h>
  23#include <linux/slab.h>
  24
  25#include "omap_remoteproc.h"
  26#include "remoteproc_internal.h"
  27#include "ti_sci_proc.h"
  28
  29/* This address can either be for ATCM or BTCM with the other at address 0x0 */
  30#define K3_R5_TCM_DEV_ADDR      0x41010000
  31
  32/* R5 TI-SCI Processor Configuration Flags */
  33#define PROC_BOOT_CFG_FLAG_R5_DBG_EN                    0x00000001
  34#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN                 0x00000002
  35#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP                  0x00000100
  36#define PROC_BOOT_CFG_FLAG_R5_TEINIT                    0x00000200
  37#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN                   0x00000400
  38#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE               0x00000800
  39#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN                   0x00001000
  40#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN                   0x00002000
  41
  42/* R5 TI-SCI Processor Control Flags */
  43#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT                0x00000001
  44
  45/* R5 TI-SCI Processor Status Flags */
  46#define PROC_BOOT_STATUS_FLAG_R5_WFE                    0x00000001
  47#define PROC_BOOT_STATUS_FLAG_R5_WFI                    0x00000002
  48#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED              0x00000004
  49#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED     0x00000100
  50
  51/**
  52 * struct k3_r5_mem - internal memory structure
  53 * @cpu_addr: MPU virtual address of the memory region
  54 * @bus_addr: Bus address used to access the memory region
  55 * @dev_addr: Device address from remoteproc view
  56 * @size: Size of the memory region
  57 */
  58struct k3_r5_mem {
  59        void __iomem *cpu_addr;
  60        phys_addr_t bus_addr;
  61        u32 dev_addr;
  62        size_t size;
  63};
  64
  65enum cluster_mode {
  66        CLUSTER_MODE_SPLIT = 0,
  67        CLUSTER_MODE_LOCKSTEP,
  68};
  69
  70/**
  71 * struct k3_r5_cluster - K3 R5F Cluster structure
  72 * @dev: cached device pointer
  73 * @mode: Mode to configure the Cluster - Split or LockStep
  74 * @cores: list of R5 cores within the cluster
  75 */
  76struct k3_r5_cluster {
  77        struct device *dev;
  78        enum cluster_mode mode;
  79        struct list_head cores;
  80};
  81
  82/**
  83 * struct k3_r5_core - K3 R5 core structure
  84 * @elem: linked list item
  85 * @dev: cached device pointer
  86 * @rproc: rproc handle representing this core
  87 * @mem: internal memory regions data
  88 * @sram: on-chip SRAM memory regions data
  89 * @num_mems: number of internal memory regions
  90 * @num_sram: number of on-chip SRAM memory regions
  91 * @reset: reset control handle
  92 * @tsp: TI-SCI processor control handle
  93 * @ti_sci: TI-SCI handle
  94 * @ti_sci_id: TI-SCI device identifier
  95 * @atcm_enable: flag to control ATCM enablement
  96 * @btcm_enable: flag to control BTCM enablement
  97 * @loczrama: flag to dictate which TCM is at device address 0x0
  98 */
  99struct k3_r5_core {
 100        struct list_head elem;
 101        struct device *dev;
 102        struct rproc *rproc;
 103        struct k3_r5_mem *mem;
 104        struct k3_r5_mem *sram;
 105        int num_mems;
 106        int num_sram;
 107        struct reset_control *reset;
 108        struct ti_sci_proc *tsp;
 109        const struct ti_sci_handle *ti_sci;
 110        u32 ti_sci_id;
 111        u32 atcm_enable;
 112        u32 btcm_enable;
 113        u32 loczrama;
 114};
 115
 116/**
 117 * struct k3_r5_rproc - K3 remote processor state
 118 * @dev: cached device pointer
 119 * @cluster: cached pointer to parent cluster structure
 120 * @mbox: mailbox channel handle
 121 * @client: mailbox client to request the mailbox channel
 122 * @rproc: rproc handle
 123 * @core: cached pointer to r5 core structure being used
 124 * @rmem: reserved memory regions data
 125 * @num_rmems: number of reserved memory regions
 126 */
 127struct k3_r5_rproc {
 128        struct device *dev;
 129        struct k3_r5_cluster *cluster;
 130        struct mbox_chan *mbox;
 131        struct mbox_client client;
 132        struct rproc *rproc;
 133        struct k3_r5_core *core;
 134        struct k3_r5_mem *rmem;
 135        int num_rmems;
 136};
 137
 138/**
 139 * k3_r5_rproc_mbox_callback() - inbound mailbox message handler
 140 * @client: mailbox client pointer used for requesting the mailbox channel
 141 * @data: mailbox payload
 142 *
 143 * This handler is invoked by the OMAP mailbox driver whenever a mailbox
 144 * message is received. Usually, the mailbox payload simply contains
 145 * the index of the virtqueue that is kicked by the remote processor,
 146 * and we let remoteproc core handle it.
 147 *
 148 * In addition to virtqueue indices, we also have some out-of-band values
 149 * that indicate different events. Those values are deliberately very
 150 * large so they don't coincide with virtqueue indices.
 151 */
 152static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
 153{
 154        struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
 155                                                client);
 156        struct device *dev = kproc->rproc->dev.parent;
 157        const char *name = kproc->rproc->name;
 158        u32 msg = omap_mbox_message(data);
 159
 160        dev_dbg(dev, "mbox msg: 0x%x\n", msg);
 161
 162        switch (msg) {
 163        case RP_MBOX_CRASH:
 164                /*
 165                 * remoteproc detected an exception, but error recovery is not
 166                 * supported. So, just log this for now
 167                 */
 168                dev_err(dev, "K3 R5F rproc %s crashed\n", name);
 169                break;
 170        case RP_MBOX_ECHO_REPLY:
 171                dev_info(dev, "received echo reply from %s\n", name);
 172                break;
 173        default:
 174                /* silently handle all other valid messages */
 175                if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
 176                        return;
 177                if (msg > kproc->rproc->max_notifyid) {
 178                        dev_dbg(dev, "dropping unknown message 0x%x", msg);
 179                        return;
 180                }
 181                /* msg contains the index of the triggered vring */
 182                if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
 183                        dev_dbg(dev, "no message was found in vqid %d\n", msg);
 184        }
 185}
 186
 187/* kick a virtqueue */
 188static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
 189{
 190        struct k3_r5_rproc *kproc = rproc->priv;
 191        struct device *dev = rproc->dev.parent;
 192        mbox_msg_t msg = (mbox_msg_t)vqid;
 193        int ret;
 194
 195        /* send the index of the triggered virtqueue in the mailbox payload */
 196        ret = mbox_send_message(kproc->mbox, (void *)msg);
 197        if (ret < 0)
 198                dev_err(dev, "failed to send mailbox message, status = %d\n",
 199                        ret);
 200}
 201
 202static int k3_r5_split_reset(struct k3_r5_core *core)
 203{
 204        int ret;
 205
 206        ret = reset_control_assert(core->reset);
 207        if (ret) {
 208                dev_err(core->dev, "local-reset assert failed, ret = %d\n",
 209                        ret);
 210                return ret;
 211        }
 212
 213        ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
 214                                                   core->ti_sci_id);
 215        if (ret) {
 216                dev_err(core->dev, "module-reset assert failed, ret = %d\n",
 217                        ret);
 218                if (reset_control_deassert(core->reset))
 219                        dev_warn(core->dev, "local-reset deassert back failed\n");
 220        }
 221
 222        return ret;
 223}
 224
 225static int k3_r5_split_release(struct k3_r5_core *core)
 226{
 227        int ret;
 228
 229        ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
 230                                                   core->ti_sci_id);
 231        if (ret) {
 232                dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
 233                        ret);
 234                return ret;
 235        }
 236
 237        ret = reset_control_deassert(core->reset);
 238        if (ret) {
 239                dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
 240                        ret);
 241                if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
 242                                                         core->ti_sci_id))
 243                        dev_warn(core->dev, "module-reset assert back failed\n");
 244        }
 245
 246        return ret;
 247}
 248
 249static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
 250{
 251        struct k3_r5_core *core;
 252        int ret;
 253
 254        /* assert local reset on all applicable cores */
 255        list_for_each_entry(core, &cluster->cores, elem) {
 256                ret = reset_control_assert(core->reset);
 257                if (ret) {
 258                        dev_err(core->dev, "local-reset assert failed, ret = %d\n",
 259                                ret);
 260                        core = list_prev_entry(core, elem);
 261                        goto unroll_local_reset;
 262                }
 263        }
 264
 265        /* disable PSC modules on all applicable cores */
 266        list_for_each_entry(core, &cluster->cores, elem) {
 267                ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
 268                                                           core->ti_sci_id);
 269                if (ret) {
 270                        dev_err(core->dev, "module-reset assert failed, ret = %d\n",
 271                                ret);
 272                        goto unroll_module_reset;
 273                }
 274        }
 275
 276        return 0;
 277
 278unroll_module_reset:
 279        list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
 280                if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
 281                                                         core->ti_sci_id))
 282                        dev_warn(core->dev, "module-reset assert back failed\n");
 283        }
 284        core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
 285unroll_local_reset:
 286        list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
 287                if (reset_control_deassert(core->reset))
 288                        dev_warn(core->dev, "local-reset deassert back failed\n");
 289        }
 290
 291        return ret;
 292}
 293
 294static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
 295{
 296        struct k3_r5_core *core;
 297        int ret;
 298
 299        /* enable PSC modules on all applicable cores */
 300        list_for_each_entry_reverse(core, &cluster->cores, elem) {
 301                ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
 302                                                           core->ti_sci_id);
 303                if (ret) {
 304                        dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
 305                                ret);
 306                        core = list_next_entry(core, elem);
 307                        goto unroll_module_reset;
 308                }
 309        }
 310
 311        /* deassert local reset on all applicable cores */
 312        list_for_each_entry_reverse(core, &cluster->cores, elem) {
 313                ret = reset_control_deassert(core->reset);
 314                if (ret) {
 315                        dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
 316                                ret);
 317                        goto unroll_local_reset;
 318                }
 319        }
 320
 321        return 0;
 322
 323unroll_local_reset:
 324        list_for_each_entry_continue(core, &cluster->cores, elem) {
 325                if (reset_control_assert(core->reset))
 326                        dev_warn(core->dev, "local-reset assert back failed\n");
 327        }
 328        core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
 329unroll_module_reset:
 330        list_for_each_entry_from(core, &cluster->cores, elem) {
 331                if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
 332                                                         core->ti_sci_id))
 333                        dev_warn(core->dev, "module-reset assert back failed\n");
 334        }
 335
 336        return ret;
 337}
 338
 339static inline int k3_r5_core_halt(struct k3_r5_core *core)
 340{
 341        return ti_sci_proc_set_control(core->tsp,
 342                                       PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
 343}
 344
 345static inline int k3_r5_core_run(struct k3_r5_core *core)
 346{
 347        return ti_sci_proc_set_control(core->tsp,
 348                                       0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
 349}
 350
 351/*
 352 * The R5F cores have controls for both a reset and a halt/run. The code
 353 * execution from DDR requires the initial boot-strapping code to be run
 354 * from the internal TCMs. This function is used to release the resets on
 355 * applicable cores to allow loading into the TCMs. The .prepare() ops is
 356 * invoked by remoteproc core before any firmware loading, and is followed
 357 * by the .start() ops after loading to actually let the R5 cores run.
 358 */
 359static int k3_r5_rproc_prepare(struct rproc *rproc)
 360{
 361        struct k3_r5_rproc *kproc = rproc->priv;
 362        struct k3_r5_cluster *cluster = kproc->cluster;
 363        struct k3_r5_core *core = kproc->core;
 364        struct device *dev = kproc->dev;
 365        int ret;
 366
 367        ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
 368                k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
 369        if (ret) {
 370                dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
 371                        ret);
 372                return ret;
 373        }
 374
 375        /*
 376         * Zero out both TCMs unconditionally (access from v8 Arm core is not
 377         * affected by ATCM & BTCM enable configuration values) so that ECC
 378         * can be effective on all TCM addresses.
 379         */
 380        dev_dbg(dev, "zeroing out ATCM memory\n");
 381        memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
 382
 383        dev_dbg(dev, "zeroing out BTCM memory\n");
 384        memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
 385
 386        return 0;
 387}
 388
 389/*
 390 * This function implements the .unprepare() ops and performs the complimentary
 391 * operations to that of the .prepare() ops. The function is used to assert the
 392 * resets on all applicable cores for the rproc device (depending on LockStep
 393 * or Split mode). This completes the second portion of powering down the R5F
 394 * cores. The cores themselves are only halted in the .stop() ops, and the
 395 * .unprepare() ops is invoked by the remoteproc core after the remoteproc is
 396 * stopped.
 397 */
 398static int k3_r5_rproc_unprepare(struct rproc *rproc)
 399{
 400        struct k3_r5_rproc *kproc = rproc->priv;
 401        struct k3_r5_cluster *cluster = kproc->cluster;
 402        struct k3_r5_core *core = kproc->core;
 403        struct device *dev = kproc->dev;
 404        int ret;
 405
 406        ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
 407                k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
 408        if (ret)
 409                dev_err(dev, "unable to disable cores, ret = %d\n", ret);
 410
 411        return ret;
 412}
 413
 414/*
 415 * The R5F start sequence includes two different operations
 416 * 1. Configure the boot vector for R5F core(s)
 417 * 2. Unhalt/Run the R5F core(s)
 418 *
 419 * The sequence is different between LockStep and Split modes. The LockStep
 420 * mode requires the boot vector to be configured only for Core0, and then
 421 * unhalt both the cores to start the execution - Core1 needs to be unhalted
 422 * first followed by Core0. The Split-mode requires that Core0 to be maintained
 423 * always in a higher power state that Core1 (implying Core1 needs to be started
 424 * always only after Core0 is started).
 425 */
 426static int k3_r5_rproc_start(struct rproc *rproc)
 427{
 428        struct k3_r5_rproc *kproc = rproc->priv;
 429        struct k3_r5_cluster *cluster = kproc->cluster;
 430        struct mbox_client *client = &kproc->client;
 431        struct device *dev = kproc->dev;
 432        struct k3_r5_core *core;
 433        u32 boot_addr;
 434        int ret;
 435
 436        client->dev = dev;
 437        client->tx_done = NULL;
 438        client->rx_callback = k3_r5_rproc_mbox_callback;
 439        client->tx_block = false;
 440        client->knows_txdone = false;
 441
 442        kproc->mbox = mbox_request_channel(client, 0);
 443        if (IS_ERR(kproc->mbox)) {
 444                ret = -EBUSY;
 445                dev_err(dev, "mbox_request_channel failed: %ld\n",
 446                        PTR_ERR(kproc->mbox));
 447                return ret;
 448        }
 449
 450        /*
 451         * Ping the remote processor, this is only for sanity-sake for now;
 452         * there is no functional effect whatsoever.
 453         *
 454         * Note that the reply will _not_ arrive immediately: this message
 455         * will wait in the mailbox fifo until the remote processor is booted.
 456         */
 457        ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
 458        if (ret < 0) {
 459                dev_err(dev, "mbox_send_message failed: %d\n", ret);
 460                goto put_mbox;
 461        }
 462
 463        boot_addr = rproc->bootaddr;
 464        /* TODO: add boot_addr sanity checking */
 465        dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
 466
 467        /* boot vector need not be programmed for Core1 in LockStep mode */
 468        core = kproc->core;
 469        ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
 470        if (ret)
 471                goto put_mbox;
 472
 473        /* unhalt/run all applicable cores */
 474        if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 475                list_for_each_entry_reverse(core, &cluster->cores, elem) {
 476                        ret = k3_r5_core_run(core);
 477                        if (ret)
 478                                goto unroll_core_run;
 479                }
 480        } else {
 481                ret = k3_r5_core_run(core);
 482                if (ret)
 483                        goto put_mbox;
 484        }
 485
 486        return 0;
 487
 488unroll_core_run:
 489        list_for_each_entry_continue(core, &cluster->cores, elem) {
 490                if (k3_r5_core_halt(core))
 491                        dev_warn(core->dev, "core halt back failed\n");
 492        }
 493put_mbox:
 494        mbox_free_channel(kproc->mbox);
 495        return ret;
 496}
 497
 498/*
 499 * The R5F stop function includes the following operations
 500 * 1. Halt R5F core(s)
 501 *
 502 * The sequence is different between LockStep and Split modes, and the order
 503 * of cores the operations are performed are also in general reverse to that
 504 * of the start function. The LockStep mode requires each operation to be
 505 * performed first on Core0 followed by Core1. The Split-mode requires that
 506 * Core0 to be maintained always in a higher power state that Core1 (implying
 507 * Core1 needs to be stopped first before Core0).
 508 *
 509 * Note that the R5F halt operation in general is not effective when the R5F
 510 * core is running, but is needed to make sure the core won't run after
 511 * deasserting the reset the subsequent time. The asserting of reset can
 512 * be done here, but is preferred to be done in the .unprepare() ops - this
 513 * maintains the symmetric behavior between the .start(), .stop(), .prepare()
 514 * and .unprepare() ops, and also balances them well between sysfs 'state'
 515 * flow and device bind/unbind or module removal.
 516 */
 517static int k3_r5_rproc_stop(struct rproc *rproc)
 518{
 519        struct k3_r5_rproc *kproc = rproc->priv;
 520        struct k3_r5_cluster *cluster = kproc->cluster;
 521        struct k3_r5_core *core = kproc->core;
 522        int ret;
 523
 524        /* halt all applicable cores */
 525        if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 526                list_for_each_entry(core, &cluster->cores, elem) {
 527                        ret = k3_r5_core_halt(core);
 528                        if (ret) {
 529                                core = list_prev_entry(core, elem);
 530                                goto unroll_core_halt;
 531                        }
 532                }
 533        } else {
 534                ret = k3_r5_core_halt(core);
 535                if (ret)
 536                        goto out;
 537        }
 538
 539        mbox_free_channel(kproc->mbox);
 540
 541        return 0;
 542
 543unroll_core_halt:
 544        list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
 545                if (k3_r5_core_run(core))
 546                        dev_warn(core->dev, "core run back failed\n");
 547        }
 548out:
 549        return ret;
 550}
 551
 552/*
 553 * Internal Memory translation helper
 554 *
 555 * Custom function implementing the rproc .da_to_va ops to provide address
 556 * translation (device address to kernel virtual address) for internal RAMs
 557 * present in a DSP or IPU device). The translated addresses can be used
 558 * either by the remoteproc core for loading, or by any rpmsg bus drivers.
 559 */
 560static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
 561{
 562        struct k3_r5_rproc *kproc = rproc->priv;
 563        struct k3_r5_core *core = kproc->core;
 564        void __iomem *va = NULL;
 565        phys_addr_t bus_addr;
 566        u32 dev_addr, offset;
 567        size_t size;
 568        int i;
 569
 570        if (len == 0)
 571                return NULL;
 572
 573        /* handle both R5 and SoC views of ATCM and BTCM */
 574        for (i = 0; i < core->num_mems; i++) {
 575                bus_addr = core->mem[i].bus_addr;
 576                dev_addr = core->mem[i].dev_addr;
 577                size = core->mem[i].size;
 578
 579                /* handle R5-view addresses of TCMs */
 580                if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
 581                        offset = da - dev_addr;
 582                        va = core->mem[i].cpu_addr + offset;
 583                        return (__force void *)va;
 584                }
 585
 586                /* handle SoC-view addresses of TCMs */
 587                if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
 588                        offset = da - bus_addr;
 589                        va = core->mem[i].cpu_addr + offset;
 590                        return (__force void *)va;
 591                }
 592        }
 593
 594        /* handle any SRAM regions using SoC-view addresses */
 595        for (i = 0; i < core->num_sram; i++) {
 596                dev_addr = core->sram[i].dev_addr;
 597                size = core->sram[i].size;
 598
 599                if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
 600                        offset = da - dev_addr;
 601                        va = core->sram[i].cpu_addr + offset;
 602                        return (__force void *)va;
 603                }
 604        }
 605
 606        /* handle static DDR reserved memory regions */
 607        for (i = 0; i < kproc->num_rmems; i++) {
 608                dev_addr = kproc->rmem[i].dev_addr;
 609                size = kproc->rmem[i].size;
 610
 611                if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
 612                        offset = da - dev_addr;
 613                        va = kproc->rmem[i].cpu_addr + offset;
 614                        return (__force void *)va;
 615                }
 616        }
 617
 618        return NULL;
 619}
 620
 621static const struct rproc_ops k3_r5_rproc_ops = {
 622        .prepare        = k3_r5_rproc_prepare,
 623        .unprepare      = k3_r5_rproc_unprepare,
 624        .start          = k3_r5_rproc_start,
 625        .stop           = k3_r5_rproc_stop,
 626        .kick           = k3_r5_rproc_kick,
 627        .da_to_va       = k3_r5_rproc_da_to_va,
 628};
 629
 630/*
 631 * Internal R5F Core configuration
 632 *
 633 * Each R5FSS has a cluster-level setting for configuring the processor
 634 * subsystem either in a safety/fault-tolerant LockStep mode or a performance
 635 * oriented Split mode. Each R5F core has a number of settings to either
 636 * enable/disable each of the TCMs, control which TCM appears at the R5F core's
 637 * address 0x0. These settings need to be configured before the resets for the
 638 * corresponding core are released. These settings are all protected and managed
 639 * by the System Processor.
 640 *
 641 * This function is used to pre-configure these settings for each R5F core, and
 642 * the configuration is all done through various ti_sci_proc functions that
 643 * communicate with the System Processor. The function also ensures that both
 644 * the cores are halted before the .prepare() step.
 645 *
 646 * The function is called from k3_r5_cluster_rproc_init() and is invoked either
 647 * once (in LockStep mode) or twice (in Split mode). Support for LockStep-mode
 648 * is dictated by an eFUSE register bit, and the config settings retrieved from
 649 * DT are adjusted accordingly as per the permitted cluster mode. All cluster
 650 * level settings like Cluster mode and TEINIT (exception handling state
 651 * dictating ARM or Thumb mode) can only be set and retrieved using Core0.
 652 *
 653 * The function behavior is different based on the cluster mode. The R5F cores
 654 * are configured independently as per their individual settings in Split mode.
 655 * They are identically configured in LockStep mode using the primary Core0
 656 * settings. However, some individual settings cannot be set in LockStep mode.
 657 * This is overcome by switching to Split-mode initially and then programming
 658 * both the cores with the same settings, before reconfiguing again for
 659 * LockStep mode.
 660 */
 661static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
 662{
 663        struct k3_r5_cluster *cluster = kproc->cluster;
 664        struct device *dev = kproc->dev;
 665        struct k3_r5_core *core0, *core, *temp;
 666        u32 ctrl = 0, cfg = 0, stat = 0;
 667        u32 set_cfg = 0, clr_cfg = 0;
 668        u64 boot_vec = 0;
 669        bool lockstep_en;
 670        int ret;
 671
 672        core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
 673        core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? core0 : kproc->core;
 674
 675        ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
 676                                     &stat);
 677        if (ret < 0)
 678                return ret;
 679
 680        dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
 681                boot_vec, cfg, ctrl, stat);
 682
 683        lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
 684        if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 685                dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
 686                cluster->mode = CLUSTER_MODE_SPLIT;
 687        }
 688
 689        /* always enable ARM mode and set boot vector to 0 */
 690        boot_vec = 0x0;
 691        if (core == core0) {
 692                clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
 693                /*
 694                 * LockStep configuration bit is Read-only on Split-mode _only_
 695                 * devices and system firmware will NACK any requests with the
 696                 * bit configured, so program it only on permitted devices
 697                 */
 698                if (lockstep_en)
 699                        clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
 700        }
 701
 702        if (core->atcm_enable)
 703                set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
 704        else
 705                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
 706
 707        if (core->btcm_enable)
 708                set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
 709        else
 710                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
 711
 712        if (core->loczrama)
 713                set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
 714        else
 715                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
 716
 717        if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 718                /*
 719                 * work around system firmware limitations to make sure both
 720                 * cores are programmed symmetrically in LockStep. LockStep
 721                 * and TEINIT config is only allowed with Core0.
 722                 */
 723                list_for_each_entry(temp, &cluster->cores, elem) {
 724                        ret = k3_r5_core_halt(temp);
 725                        if (ret)
 726                                goto out;
 727
 728                        if (temp != core) {
 729                                clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
 730                                clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
 731                        }
 732                        ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
 733                                                     set_cfg, clr_cfg);
 734                        if (ret)
 735                                goto out;
 736                }
 737
 738                set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
 739                clr_cfg = 0;
 740                ret = ti_sci_proc_set_config(core->tsp, boot_vec,
 741                                             set_cfg, clr_cfg);
 742        } else {
 743                ret = k3_r5_core_halt(core);
 744                if (ret)
 745                        goto out;
 746
 747                ret = ti_sci_proc_set_config(core->tsp, boot_vec,
 748                                             set_cfg, clr_cfg);
 749        }
 750
 751out:
 752        return ret;
 753}
 754
 755static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
 756{
 757        struct device *dev = kproc->dev;
 758        struct device_node *np = dev_of_node(dev);
 759        struct device_node *rmem_np;
 760        struct reserved_mem *rmem;
 761        int num_rmems;
 762        int ret, i;
 763
 764        num_rmems = of_property_count_elems_of_size(np, "memory-region",
 765                                                    sizeof(phandle));
 766        if (num_rmems <= 0) {
 767                dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
 768                        num_rmems);
 769                return -EINVAL;
 770        }
 771        if (num_rmems < 2) {
 772                dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
 773                        num_rmems);
 774                return -EINVAL;
 775        }
 776
 777        /* use reserved memory region 0 for vring DMA allocations */
 778        ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
 779        if (ret) {
 780                dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
 781                        ret);
 782                return ret;
 783        }
 784
 785        num_rmems--;
 786        kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
 787        if (!kproc->rmem) {
 788                ret = -ENOMEM;
 789                goto release_rmem;
 790        }
 791
 792        /* use remaining reserved memory regions for static carveouts */
 793        for (i = 0; i < num_rmems; i++) {
 794                rmem_np = of_parse_phandle(np, "memory-region", i + 1);
 795                if (!rmem_np) {
 796                        ret = -EINVAL;
 797                        goto unmap_rmem;
 798                }
 799
 800                rmem = of_reserved_mem_lookup(rmem_np);
 801                if (!rmem) {
 802                        of_node_put(rmem_np);
 803                        ret = -EINVAL;
 804                        goto unmap_rmem;
 805                }
 806                of_node_put(rmem_np);
 807
 808                kproc->rmem[i].bus_addr = rmem->base;
 809                /*
 810                 * R5Fs do not have an MMU, but have a Region Address Translator
 811                 * (RAT) module that provides a fixed entry translation between
 812                 * the 32-bit processor addresses to 64-bit bus addresses. The
 813                 * RAT is programmable only by the R5F cores. Support for RAT
 814                 * is currently not supported, so 64-bit address regions are not
 815                 * supported. The absence of MMUs implies that the R5F device
 816                 * addresses/supported memory regions are restricted to 32-bit
 817                 * bus addresses, and are identical
 818                 */
 819                kproc->rmem[i].dev_addr = (u32)rmem->base;
 820                kproc->rmem[i].size = rmem->size;
 821                kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
 822                if (!kproc->rmem[i].cpu_addr) {
 823                        dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
 824                                i + 1, &rmem->base, &rmem->size);
 825                        ret = -ENOMEM;
 826                        goto unmap_rmem;
 827                }
 828
 829                dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
 830                        i + 1, &kproc->rmem[i].bus_addr,
 831                        kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
 832                        kproc->rmem[i].dev_addr);
 833        }
 834        kproc->num_rmems = num_rmems;
 835
 836        return 0;
 837
 838unmap_rmem:
 839        for (i--; i >= 0; i--)
 840                iounmap(kproc->rmem[i].cpu_addr);
 841        kfree(kproc->rmem);
 842release_rmem:
 843        of_reserved_mem_device_release(dev);
 844        return ret;
 845}
 846
 847static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
 848{
 849        int i;
 850
 851        for (i = 0; i < kproc->num_rmems; i++)
 852                iounmap(kproc->rmem[i].cpu_addr);
 853        kfree(kproc->rmem);
 854
 855        of_reserved_mem_device_release(kproc->dev);
 856}
 857
 858static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
 859{
 860        struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
 861        struct device *dev = &pdev->dev;
 862        struct k3_r5_rproc *kproc;
 863        struct k3_r5_core *core, *core1;
 864        struct device *cdev;
 865        const char *fw_name;
 866        struct rproc *rproc;
 867        int ret;
 868
 869        core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
 870        list_for_each_entry(core, &cluster->cores, elem) {
 871                cdev = core->dev;
 872                ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
 873                if (ret) {
 874                        dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
 875                                ret);
 876                        goto out;
 877                }
 878
 879                rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
 880                                    fw_name, sizeof(*kproc));
 881                if (!rproc) {
 882                        ret = -ENOMEM;
 883                        goto out;
 884                }
 885
 886                /* K3 R5s have a Region Address Translator (RAT) but no MMU */
 887                rproc->has_iommu = false;
 888                /* error recovery is not supported at present */
 889                rproc->recovery_disabled = true;
 890
 891                kproc = rproc->priv;
 892                kproc->cluster = cluster;
 893                kproc->core = core;
 894                kproc->dev = cdev;
 895                kproc->rproc = rproc;
 896                core->rproc = rproc;
 897
 898                ret = k3_r5_rproc_configure(kproc);
 899                if (ret) {
 900                        dev_err(dev, "initial configure failed, ret = %d\n",
 901                                ret);
 902                        goto err_config;
 903                }
 904
 905                ret = k3_r5_reserved_mem_init(kproc);
 906                if (ret) {
 907                        dev_err(dev, "reserved memory init failed, ret = %d\n",
 908                                ret);
 909                        goto err_config;
 910                }
 911
 912                ret = rproc_add(rproc);
 913                if (ret) {
 914                        dev_err(dev, "rproc_add failed, ret = %d\n", ret);
 915                        goto err_add;
 916                }
 917
 918                /* create only one rproc in lockstep mode */
 919                if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
 920                        break;
 921        }
 922
 923        return 0;
 924
 925err_split:
 926        rproc_del(rproc);
 927err_add:
 928        k3_r5_reserved_mem_exit(kproc);
 929err_config:
 930        rproc_free(rproc);
 931        core->rproc = NULL;
 932out:
 933        /* undo core0 upon any failures on core1 in split-mode */
 934        if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
 935                core = list_prev_entry(core, elem);
 936                rproc = core->rproc;
 937                kproc = rproc->priv;
 938                goto err_split;
 939        }
 940        return ret;
 941}
 942
 943static int k3_r5_cluster_rproc_exit(struct platform_device *pdev)
 944{
 945        struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
 946        struct k3_r5_rproc *kproc;
 947        struct k3_r5_core *core;
 948        struct rproc *rproc;
 949
 950        /*
 951         * lockstep mode has only one rproc associated with first core, whereas
 952         * split-mode has two rprocs associated with each core, and requires
 953         * that core1 be powered down first
 954         */
 955        core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
 956                list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
 957                list_last_entry(&cluster->cores, struct k3_r5_core, elem);
 958
 959        list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
 960                rproc = core->rproc;
 961                kproc = rproc->priv;
 962
 963                rproc_del(rproc);
 964
 965                k3_r5_reserved_mem_exit(kproc);
 966
 967                rproc_free(rproc);
 968                core->rproc = NULL;
 969        }
 970
 971        return 0;
 972}
 973
 974static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
 975                                               struct k3_r5_core *core)
 976{
 977        static const char * const mem_names[] = {"atcm", "btcm"};
 978        struct device *dev = &pdev->dev;
 979        struct resource *res;
 980        int num_mems;
 981        int i;
 982
 983        num_mems = ARRAY_SIZE(mem_names);
 984        core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
 985        if (!core->mem)
 986                return -ENOMEM;
 987
 988        for (i = 0; i < num_mems; i++) {
 989                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 990                                                   mem_names[i]);
 991                if (!res) {
 992                        dev_err(dev, "found no memory resource for %s\n",
 993                                mem_names[i]);
 994                        return -EINVAL;
 995                }
 996                if (!devm_request_mem_region(dev, res->start,
 997                                             resource_size(res),
 998                                             dev_name(dev))) {
 999                        dev_err(dev, "could not request %s region for resource\n",
1000                                mem_names[i]);
1001                        return -EBUSY;
1002                }
1003
1004                /*
1005                 * TCMs are designed in general to support RAM-like backing
1006                 * memories. So, map these as Normal Non-Cached memories. This
1007                 * also avoids/fixes any potential alignment faults due to
1008                 * unaligned data accesses when using memcpy() or memset()
1009                 * functions (normally seen with device type memory).
1010                 */
1011                core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
1012                                                        resource_size(res));
1013                if (!core->mem[i].cpu_addr) {
1014                        dev_err(dev, "failed to map %s memory\n", mem_names[i]);
1015                        return -ENOMEM;
1016                }
1017                core->mem[i].bus_addr = res->start;
1018
1019                /*
1020                 * TODO:
1021                 * The R5F cores can place ATCM & BTCM anywhere in its address
1022                 * based on the corresponding Region Registers in the System
1023                 * Control coprocessor. For now, place ATCM and BTCM at
1024                 * addresses 0 and 0x41010000 (same as the bus address on AM65x
1025                 * SoCs) based on loczrama setting
1026                 */
1027                if (!strcmp(mem_names[i], "atcm")) {
1028                        core->mem[i].dev_addr = core->loczrama ?
1029                                                        0 : K3_R5_TCM_DEV_ADDR;
1030                } else {
1031                        core->mem[i].dev_addr = core->loczrama ?
1032                                                        K3_R5_TCM_DEV_ADDR : 0;
1033                }
1034                core->mem[i].size = resource_size(res);
1035
1036                dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1037                        mem_names[i], &core->mem[i].bus_addr,
1038                        core->mem[i].size, core->mem[i].cpu_addr,
1039                        core->mem[i].dev_addr);
1040        }
1041        core->num_mems = num_mems;
1042
1043        return 0;
1044}
1045
1046static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
1047                                           struct k3_r5_core *core)
1048{
1049        struct device_node *np = pdev->dev.of_node;
1050        struct device *dev = &pdev->dev;
1051        struct device_node *sram_np;
1052        struct resource res;
1053        int num_sram;
1054        int i, ret;
1055
1056        num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
1057        if (num_sram <= 0) {
1058                dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
1059                        num_sram);
1060                return 0;
1061        }
1062
1063        core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
1064        if (!core->sram)
1065                return -ENOMEM;
1066
1067        for (i = 0; i < num_sram; i++) {
1068                sram_np = of_parse_phandle(np, "sram", i);
1069                if (!sram_np)
1070                        return -EINVAL;
1071
1072                if (!of_device_is_available(sram_np)) {
1073                        of_node_put(sram_np);
1074                        return -EINVAL;
1075                }
1076
1077                ret = of_address_to_resource(sram_np, 0, &res);
1078                of_node_put(sram_np);
1079                if (ret)
1080                        return -EINVAL;
1081
1082                core->sram[i].bus_addr = res.start;
1083                core->sram[i].dev_addr = res.start;
1084                core->sram[i].size = resource_size(&res);
1085                core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
1086                                                         resource_size(&res));
1087                if (!core->sram[i].cpu_addr) {
1088                        dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
1089                                i, &res.start);
1090                        return -ENOMEM;
1091                }
1092
1093                dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1094                        i, &core->sram[i].bus_addr,
1095                        core->sram[i].size, core->sram[i].cpu_addr,
1096                        core->sram[i].dev_addr);
1097        }
1098        core->num_sram = num_sram;
1099
1100        return 0;
1101}
1102
1103static
1104struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
1105                                          const struct ti_sci_handle *sci)
1106{
1107        struct ti_sci_proc *tsp;
1108        u32 temp[2];
1109        int ret;
1110
1111        ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
1112                                         temp, 2);
1113        if (ret < 0)
1114                return ERR_PTR(ret);
1115
1116        tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
1117        if (!tsp)
1118                return ERR_PTR(-ENOMEM);
1119
1120        tsp->dev = dev;
1121        tsp->sci = sci;
1122        tsp->ops = &sci->ops.proc_ops;
1123        tsp->proc_id = temp[0];
1124        tsp->host_id = temp[1];
1125
1126        return tsp;
1127}
1128
1129static int k3_r5_core_of_init(struct platform_device *pdev)
1130{
1131        struct device *dev = &pdev->dev;
1132        struct device_node *np = dev_of_node(dev);
1133        struct k3_r5_core *core;
1134        int ret;
1135
1136        if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL))
1137                return -ENOMEM;
1138
1139        core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
1140        if (!core) {
1141                ret = -ENOMEM;
1142                goto err;
1143        }
1144
1145        core->dev = dev;
1146        /*
1147         * Use SoC Power-on-Reset values as default if no DT properties are
1148         * used to dictate the TCM configurations
1149         */
1150        core->atcm_enable = 0;
1151        core->btcm_enable = 1;
1152        core->loczrama = 1;
1153
1154        ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable);
1155        if (ret < 0 && ret != -EINVAL) {
1156                dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n",
1157                        ret);
1158                goto err;
1159        }
1160
1161        ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable);
1162        if (ret < 0 && ret != -EINVAL) {
1163                dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n",
1164                        ret);
1165                goto err;
1166        }
1167
1168        ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama);
1169        if (ret < 0 && ret != -EINVAL) {
1170                dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret);
1171                goto err;
1172        }
1173
1174        core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
1175        if (IS_ERR(core->ti_sci)) {
1176                ret = PTR_ERR(core->ti_sci);
1177                if (ret != -EPROBE_DEFER) {
1178                        dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
1179                                ret);
1180                }
1181                core->ti_sci = NULL;
1182                goto err;
1183        }
1184
1185        ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
1186        if (ret) {
1187                dev_err(dev, "missing 'ti,sci-dev-id' property\n");
1188                goto err;
1189        }
1190
1191        core->reset = devm_reset_control_get_exclusive(dev, NULL);
1192        if (IS_ERR_OR_NULL(core->reset)) {
1193                ret = PTR_ERR_OR_ZERO(core->reset);
1194                if (!ret)
1195                        ret = -ENODEV;
1196                if (ret != -EPROBE_DEFER) {
1197                        dev_err(dev, "failed to get reset handle, ret = %d\n",
1198                                ret);
1199                }
1200                goto err;
1201        }
1202
1203        core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
1204        if (IS_ERR(core->tsp)) {
1205                dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
1206                        ret);
1207                ret = PTR_ERR(core->tsp);
1208                goto err;
1209        }
1210
1211        ret = k3_r5_core_of_get_internal_memories(pdev, core);
1212        if (ret) {
1213                dev_err(dev, "failed to get internal memories, ret = %d\n",
1214                        ret);
1215                goto err;
1216        }
1217
1218        ret = k3_r5_core_of_get_sram_memories(pdev, core);
1219        if (ret) {
1220                dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
1221                goto err;
1222        }
1223
1224        ret = ti_sci_proc_request(core->tsp);
1225        if (ret < 0) {
1226                dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
1227                goto err;
1228        }
1229
1230        platform_set_drvdata(pdev, core);
1231        devres_close_group(dev, k3_r5_core_of_init);
1232
1233        return 0;
1234
1235err:
1236        devres_release_group(dev, k3_r5_core_of_init);
1237        return ret;
1238}
1239
1240/*
1241 * free the resources explicitly since driver model is not being used
1242 * for the child R5F devices
1243 */
1244static void k3_r5_core_of_exit(struct platform_device *pdev)
1245{
1246        struct k3_r5_core *core = platform_get_drvdata(pdev);
1247        struct device *dev = &pdev->dev;
1248        int ret;
1249
1250        ret = ti_sci_proc_release(core->tsp);
1251        if (ret)
1252                dev_err(dev, "failed to release proc, ret = %d\n", ret);
1253
1254        platform_set_drvdata(pdev, NULL);
1255        devres_release_group(dev, k3_r5_core_of_init);
1256}
1257
1258static void k3_r5_cluster_of_exit(struct platform_device *pdev)
1259{
1260        struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1261        struct platform_device *cpdev;
1262        struct k3_r5_core *core, *temp;
1263
1264        list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) {
1265                list_del(&core->elem);
1266                cpdev = to_platform_device(core->dev);
1267                k3_r5_core_of_exit(cpdev);
1268        }
1269}
1270
1271static int k3_r5_cluster_of_init(struct platform_device *pdev)
1272{
1273        struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1274        struct device *dev = &pdev->dev;
1275        struct device_node *np = dev_of_node(dev);
1276        struct platform_device *cpdev;
1277        struct device_node *child;
1278        struct k3_r5_core *core;
1279        int ret;
1280
1281        for_each_available_child_of_node(np, child) {
1282                cpdev = of_find_device_by_node(child);
1283                if (!cpdev) {
1284                        ret = -ENODEV;
1285                        dev_err(dev, "could not get R5 core platform device\n");
1286                        goto fail;
1287                }
1288
1289                ret = k3_r5_core_of_init(cpdev);
1290                if (ret) {
1291                        dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
1292                                ret);
1293                        put_device(&cpdev->dev);
1294                        goto fail;
1295                }
1296
1297                core = platform_get_drvdata(cpdev);
1298                put_device(&cpdev->dev);
1299                list_add_tail(&core->elem, &cluster->cores);
1300        }
1301
1302        return 0;
1303
1304fail:
1305        k3_r5_cluster_of_exit(pdev);
1306        return ret;
1307}
1308
1309static int k3_r5_probe(struct platform_device *pdev)
1310{
1311        struct device *dev = &pdev->dev;
1312        struct device_node *np = dev_of_node(dev);
1313        struct k3_r5_cluster *cluster;
1314        int ret;
1315        int num_cores;
1316
1317        cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
1318        if (!cluster)
1319                return -ENOMEM;
1320
1321        cluster->dev = dev;
1322        cluster->mode = CLUSTER_MODE_LOCKSTEP;
1323        INIT_LIST_HEAD(&cluster->cores);
1324
1325        ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
1326        if (ret < 0 && ret != -EINVAL) {
1327                dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
1328                        ret);
1329                return ret;
1330        }
1331
1332        num_cores = of_get_available_child_count(np);
1333        if (num_cores != 2) {
1334                dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n",
1335                        num_cores);
1336                return -ENODEV;
1337        }
1338
1339        platform_set_drvdata(pdev, cluster);
1340
1341        ret = devm_of_platform_populate(dev);
1342        if (ret) {
1343                dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
1344                        ret);
1345                return ret;
1346        }
1347
1348        ret = k3_r5_cluster_of_init(pdev);
1349        if (ret) {
1350                dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
1351                return ret;
1352        }
1353
1354        ret = devm_add_action_or_reset(dev,
1355                                       (void(*)(void *))k3_r5_cluster_of_exit,
1356                                       pdev);
1357        if (ret)
1358                return ret;
1359
1360        ret = k3_r5_cluster_rproc_init(pdev);
1361        if (ret) {
1362                dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
1363                        ret);
1364                return ret;
1365        }
1366
1367        ret = devm_add_action_or_reset(dev,
1368                                       (void(*)(void *))k3_r5_cluster_rproc_exit,
1369                                       pdev);
1370        if (ret)
1371                return ret;
1372
1373        return 0;
1374}
1375
1376static const struct of_device_id k3_r5_of_match[] = {
1377        { .compatible = "ti,am654-r5fss", },
1378        { .compatible = "ti,j721e-r5fss", },
1379        { /* sentinel */ },
1380};
1381MODULE_DEVICE_TABLE(of, k3_r5_of_match);
1382
1383static struct platform_driver k3_r5_rproc_driver = {
1384        .probe = k3_r5_probe,
1385        .driver = {
1386                .name = "k3_r5_rproc",
1387                .of_match_table = k3_r5_of_match,
1388        },
1389};
1390
1391module_platform_driver(k3_r5_rproc_driver);
1392
1393MODULE_LICENSE("GPL v2");
1394MODULE_DESCRIPTION("TI K3 R5F remote processor driver");
1395MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
1396