uboot/drivers/remoteproc/ti_k3_r5f_rproc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Texas Instruments' K3 R5 Remoteproc driver
   4 *
   5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
   6 *      Lokesh Vutla <lokeshvutla@ti.com>
   7 *      Suman Anna <s-anna@ti.com>
   8 */
   9
  10#include <common.h>
  11#include <dm.h>
  12#include <log.h>
  13#include <malloc.h>
  14#include <remoteproc.h>
  15#include <errno.h>
  16#include <clk.h>
  17#include <reset.h>
  18#include <asm/io.h>
  19#include <dm/device_compat.h>
  20#include <linux/err.h>
  21#include <linux/kernel.h>
  22#include <linux/soc/ti/ti_sci_protocol.h>
  23#include "ti_sci_proc.h"
  24
  25/*
  26 * R5F's view of this address can either be for ATCM or BTCM with the other
  27 * at address 0x0 based on loczrama signal.
  28 */
  29#define K3_R5_TCM_DEV_ADDR      0x41010000
  30
  31/* R5 TI-SCI Processor Configuration Flags */
  32#define PROC_BOOT_CFG_FLAG_R5_DBG_EN                    0x00000001
  33#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN                 0x00000002
  34#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP                  0x00000100
  35#define PROC_BOOT_CFG_FLAG_R5_TEINIT                    0x00000200
  36#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN                   0x00000400
  37#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE               0x00000800
  38#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN                   0x00001000
  39#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN                   0x00002000
  40#define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR           0x10000000
  41/* Available from J7200 SoCs onwards */
  42#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS              0x00004000
  43
  44/* R5 TI-SCI Processor Control Flags */
  45#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT                0x00000001
  46
  47/* R5 TI-SCI Processor Status Flags */
  48#define PROC_BOOT_STATUS_FLAG_R5_WFE                    0x00000001
  49#define PROC_BOOT_STATUS_FLAG_R5_WFI                    0x00000002
  50#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED              0x00000004
  51#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED     0x00000100
  52
  53#define NR_CORES        2
  54
  55enum cluster_mode {
  56        CLUSTER_MODE_SPLIT = 0,
  57        CLUSTER_MODE_LOCKSTEP,
  58};
  59
  60/**
  61 * struct k3_r5f_ip_data - internal data structure used for IP variations
  62 * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
  63 * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
  64 */
  65struct k3_r5f_ip_data {
  66        bool tcm_is_double;
  67        bool tcm_ecc_autoinit;
  68};
  69
  70/**
  71 * struct k3_r5_mem - internal memory structure
  72 * @cpu_addr: MPU virtual address of the memory region
  73 * @bus_addr: Bus address used to access the memory region
  74 * @dev_addr: Device address from remoteproc view
  75 * @size: Size of the memory region
  76 */
  77struct k3_r5f_mem {
  78        void __iomem *cpu_addr;
  79        phys_addr_t bus_addr;
  80        u32 dev_addr;
  81        size_t size;
  82};
  83
  84/**
  85 * struct k3_r5f_core - K3 R5 core structure
  86 * @dev: cached device pointer
  87 * @cluster: pointer to the parent cluster.
  88 * @reset: reset control handle
  89 * @tsp: TI-SCI processor control handle
  90 * @ipdata: cached pointer to R5F IP specific feature data
  91 * @mem: Array of available internal memories
  92 * @num_mem: Number of available memories
  93 * @atcm_enable: flag to control ATCM enablement
  94 * @btcm_enable: flag to control BTCM enablement
  95 * @loczrama: flag to dictate which TCM is at device address 0x0
  96 * @in_use: flag to tell if the core is already in use.
  97 */
  98struct k3_r5f_core {
  99        struct udevice *dev;
 100        struct k3_r5f_cluster *cluster;
 101        struct reset_ctl reset;
 102        struct ti_sci_proc tsp;
 103        struct k3_r5f_ip_data *ipdata;
 104        struct k3_r5f_mem *mem;
 105        int num_mems;
 106        u32 atcm_enable;
 107        u32 btcm_enable;
 108        u32 loczrama;
 109        bool in_use;
 110};
 111
 112/**
 113 * struct k3_r5f_cluster - K3 R5F Cluster structure
 114 * @mode: Mode to configure the Cluster - Split or LockStep
 115 * @cores: Array of pointers to R5 cores within the cluster
 116 */
 117struct k3_r5f_cluster {
 118        enum cluster_mode mode;
 119        struct k3_r5f_core *cores[NR_CORES];
 120};
 121
 122static bool is_primary_core(struct k3_r5f_core *core)
 123{
 124        return core == core->cluster->cores[0];
 125}
 126
 127static int k3_r5f_proc_request(struct k3_r5f_core *core)
 128{
 129        struct k3_r5f_cluster *cluster = core->cluster;
 130        int i, ret;
 131
 132        if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 133                for (i = 0; i < NR_CORES; i++) {
 134                        ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
 135                        if (ret)
 136                                goto proc_release;
 137                }
 138        } else {
 139                ret = ti_sci_proc_request(&core->tsp);
 140        }
 141
 142        return 0;
 143
 144proc_release:
 145        while (i >= 0) {
 146                ti_sci_proc_release(&cluster->cores[i]->tsp);
 147                i--;
 148        }
 149        return ret;
 150}
 151
 152static void k3_r5f_proc_release(struct k3_r5f_core *core)
 153{
 154        struct k3_r5f_cluster *cluster = core->cluster;
 155        int i;
 156
 157        if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
 158                for (i = 0; i < NR_CORES; i++)
 159                        ti_sci_proc_release(&cluster->cores[i]->tsp);
 160        else
 161                ti_sci_proc_release(&core->tsp);
 162}
 163
 164static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
 165{
 166        int ret, c;
 167
 168        debug("%s\n", __func__);
 169
 170        for (c = NR_CORES - 1; c >= 0; c--) {
 171                ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
 172                if (ret)
 173                        goto unroll_module_reset;
 174        }
 175
 176        /* deassert local reset on all applicable cores */
 177        for (c = NR_CORES - 1; c >= 0; c--) {
 178                ret = reset_deassert(&cluster->cores[c]->reset);
 179                if (ret)
 180                        goto unroll_local_reset;
 181        }
 182
 183        return 0;
 184
 185unroll_local_reset:
 186        while (c < NR_CORES) {
 187                reset_assert(&cluster->cores[c]->reset);
 188                c++;
 189        }
 190        c = 0;
 191unroll_module_reset:
 192        while (c < NR_CORES) {
 193                ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
 194                c++;
 195        }
 196
 197        return ret;
 198}
 199
 200static int k3_r5f_split_release(struct k3_r5f_core *core)
 201{
 202        int ret;
 203
 204        dev_dbg(core->dev, "%s\n", __func__);
 205
 206        ret = ti_sci_proc_power_domain_on(&core->tsp);
 207        if (ret) {
 208                dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
 209                        ret);
 210                return ret;
 211        }
 212
 213        ret = reset_deassert(&core->reset);
 214        if (ret) {
 215                dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
 216                        ret);
 217                if (ti_sci_proc_power_domain_off(&core->tsp))
 218                        dev_warn(core->dev, "module-reset assert back failed\n");
 219        }
 220
 221        return ret;
 222}
 223
 224static int k3_r5f_prepare(struct udevice *dev)
 225{
 226        struct k3_r5f_core *core = dev_get_priv(dev);
 227        struct k3_r5f_cluster *cluster = core->cluster;
 228        int ret = 0;
 229
 230        dev_dbg(dev, "%s\n", __func__);
 231
 232        if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
 233                ret = k3_r5f_lockstep_release(cluster);
 234        else
 235                ret = k3_r5f_split_release(core);
 236
 237        if (ret)
 238                dev_err(dev, "Unable to enable cores for TCM loading %d\n",
 239                        ret);
 240
 241        return ret;
 242}
 243
 244static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
 245{
 246        struct k3_r5f_cluster *cluster = core->cluster;
 247
 248        if (core->in_use) {
 249                dev_err(core->dev,
 250                        "Invalid op: Trying to load/start on already running core %d\n",
 251                        core->tsp.proc_id);
 252                return -EINVAL;
 253        }
 254
 255        if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
 256                dev_err(core->dev,
 257                        "Secondary core is not probed in this cluster\n");
 258                return -EAGAIN;
 259        }
 260
 261        if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
 262                dev_err(core->dev,
 263                        "Invalid op: Trying to start secondary core %d in lockstep mode\n",
 264                        core->tsp.proc_id);
 265                return -EINVAL;
 266        }
 267
 268        if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
 269                if (!core->cluster->cores[0]->in_use) {
 270                        dev_err(core->dev,
 271                                "Invalid seq: Enable primary core before loading secondary core\n");
 272                        return -EINVAL;
 273                }
 274        }
 275
 276        return 0;
 277}
 278
 279/* Zero out TCMs so that ECC can be effective on all TCM addresses */
 280void k3_r5f_init_tcm_memories(struct k3_r5f_core *core, bool auto_inited)
 281{
 282        if (core->ipdata->tcm_ecc_autoinit && auto_inited)
 283                return;
 284
 285        if (core->atcm_enable)
 286                memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
 287        if (core->btcm_enable)
 288                memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
 289}
 290
 291/**
 292 * k3_r5f_load() - Load up the Remote processor image
 293 * @dev:        rproc device pointer
 294 * @addr:       Address at which image is available
 295 * @size:       size of the image
 296 *
 297 * Return: 0 if all goes good, else appropriate error message.
 298 */
 299static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
 300{
 301        struct k3_r5f_core *core = dev_get_priv(dev);
 302        u64 boot_vector;
 303        u32 ctrl, sts, cfg = 0;
 304        bool mem_auto_init;
 305        int ret;
 306
 307        dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
 308
 309        ret = k3_r5f_core_sanity_check(core);
 310        if (ret)
 311                return ret;
 312
 313        ret = k3_r5f_proc_request(core);
 314        if (ret)
 315                return ret;
 316
 317        ret = ti_sci_proc_get_status(&core->tsp, &boot_vector, &cfg, &ctrl,
 318                                     &sts);
 319        if (ret)
 320                return ret;
 321        mem_auto_init = !(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
 322
 323        ret = k3_r5f_prepare(dev);
 324        if (ret) {
 325                dev_err(dev, "R5f prepare failed for core %d\n",
 326                        core->tsp.proc_id);
 327                goto proc_release;
 328        }
 329
 330        k3_r5f_init_tcm_memories(core, mem_auto_init);
 331
 332        ret = rproc_elf_load_image(dev, addr, size);
 333        if (ret < 0) {
 334                dev_err(dev, "Loading elf failedi %d\n", ret);
 335                goto proc_release;
 336        }
 337
 338        boot_vector = rproc_elf_get_boot_addr(dev, addr);
 339
 340        dev_dbg(dev, "%s: Boot vector = 0x%llx\n", __func__, boot_vector);
 341
 342        ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
 343
 344proc_release:
 345        k3_r5f_proc_release(core);
 346
 347        return ret;
 348}
 349
 350static int k3_r5f_core_halt(struct k3_r5f_core *core)
 351{
 352        int ret;
 353
 354        ret = ti_sci_proc_set_control(&core->tsp,
 355                                      PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
 356        if (ret)
 357                dev_err(core->dev, "Core %d failed to stop\n",
 358                        core->tsp.proc_id);
 359
 360        return ret;
 361}
 362
 363static int k3_r5f_core_run(struct k3_r5f_core *core)
 364{
 365        int ret;
 366
 367        ret = ti_sci_proc_set_control(&core->tsp,
 368                                      0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
 369        if (ret) {
 370                dev_err(core->dev, "Core %d failed to start\n",
 371                        core->tsp.proc_id);
 372                return ret;
 373        }
 374
 375        return 0;
 376}
 377
 378/**
 379 * k3_r5f_start() - Start the remote processor
 380 * @dev:        rproc device pointer
 381 *
 382 * Return: 0 if all went ok, else return appropriate error
 383 */
 384static int k3_r5f_start(struct udevice *dev)
 385{
 386        struct k3_r5f_core *core = dev_get_priv(dev);
 387        struct k3_r5f_cluster *cluster = core->cluster;
 388        int ret, c;
 389
 390        dev_dbg(dev, "%s\n", __func__);
 391
 392        ret = k3_r5f_core_sanity_check(core);
 393        if (ret)
 394                return ret;
 395
 396        ret = k3_r5f_proc_request(core);
 397        if (ret)
 398                return ret;
 399
 400        if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 401                if (is_primary_core(core)) {
 402                        for (c = NR_CORES - 1; c >= 0; c--) {
 403                                ret = k3_r5f_core_run(cluster->cores[c]);
 404                                if (ret)
 405                                        goto unroll_core_run;
 406                        }
 407                } else {
 408                        dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
 409                                core->tsp.proc_id);
 410                        ret = -EINVAL;
 411                        goto proc_release;
 412                }
 413        } else {
 414                ret = k3_r5f_core_run(core);
 415                if (ret)
 416                        goto proc_release;
 417        }
 418
 419        core->in_use = true;
 420
 421        k3_r5f_proc_release(core);
 422        return 0;
 423
 424unroll_core_run:
 425        while (c < NR_CORES) {
 426                k3_r5f_core_halt(cluster->cores[c]);
 427                c++;
 428        }
 429proc_release:
 430        k3_r5f_proc_release(core);
 431
 432        return ret;
 433}
 434
 435static int k3_r5f_split_reset(struct k3_r5f_core *core)
 436{
 437        int ret;
 438
 439        dev_dbg(core->dev, "%s\n", __func__);
 440
 441        if (reset_assert(&core->reset))
 442                ret = -EINVAL;
 443
 444        if (ti_sci_proc_power_domain_off(&core->tsp))
 445                ret = -EINVAL;
 446
 447        return ret;
 448}
 449
 450static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
 451{
 452        int ret = 0, c;
 453
 454        debug("%s\n", __func__);
 455
 456        for (c = 0; c < NR_CORES; c++)
 457                if (reset_assert(&cluster->cores[c]->reset))
 458                        ret = -EINVAL;
 459
 460        /* disable PSC modules on all applicable cores */
 461        for (c = 0; c < NR_CORES; c++)
 462                if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
 463                        ret = -EINVAL;
 464
 465        return ret;
 466}
 467
 468static int k3_r5f_unprepare(struct udevice *dev)
 469{
 470        struct k3_r5f_core *core = dev_get_priv(dev);
 471        struct k3_r5f_cluster *cluster = core->cluster;
 472        int ret;
 473
 474        dev_dbg(dev, "%s\n", __func__);
 475
 476        if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 477                if (is_primary_core(core))
 478                        ret = k3_r5f_lockstep_reset(cluster);
 479        } else {
 480                ret = k3_r5f_split_reset(core);
 481        }
 482
 483        if (ret)
 484                dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
 485                         ret);
 486
 487        return 0;
 488}
 489
 490static int k3_r5f_stop(struct udevice *dev)
 491{
 492        struct k3_r5f_core *core = dev_get_priv(dev);
 493        struct k3_r5f_cluster *cluster = core->cluster;
 494        int c, ret;
 495
 496        dev_dbg(dev, "%s\n", __func__);
 497
 498        ret = k3_r5f_proc_request(core);
 499        if (ret)
 500                return ret;
 501
 502        core->in_use = false;
 503
 504        if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
 505                if (is_primary_core(core)) {
 506                        for (c = 0; c < NR_CORES; c++)
 507                                k3_r5f_core_halt(cluster->cores[c]);
 508                } else {
 509                        dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
 510                        ret = -EINVAL;
 511                        goto proc_release;
 512                }
 513        } else {
 514                k3_r5f_core_halt(core);
 515        }
 516
 517        ret = k3_r5f_unprepare(dev);
 518proc_release:
 519        k3_r5f_proc_release(core);
 520        return ret;
 521}
 522
 523static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
 524{
 525        struct k3_r5f_core *core = dev_get_priv(dev);
 526        void __iomem *va = NULL;
 527        phys_addr_t bus_addr;
 528        u32 dev_addr, offset;
 529        ulong mem_size;
 530        int i;
 531
 532        dev_dbg(dev, "%s\n", __func__);
 533
 534        if (size <= 0)
 535                return NULL;
 536
 537        for (i = 0; i < core->num_mems; i++) {
 538                bus_addr = core->mem[i].bus_addr;
 539                dev_addr = core->mem[i].dev_addr;
 540                mem_size = core->mem[i].size;
 541
 542                if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
 543                        offset = da - bus_addr;
 544                        va = core->mem[i].cpu_addr + offset;
 545                        return (__force void *)va;
 546                }
 547
 548                if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
 549                        offset = da - dev_addr;
 550                        va = core->mem[i].cpu_addr + offset;
 551                        return (__force void *)va;
 552                }
 553        }
 554
 555        /* Assume it is DDR region and return da */
 556        return map_physmem(da, size, MAP_NOCACHE);
 557}
 558
 559static int k3_r5f_init(struct udevice *dev)
 560{
 561        return 0;
 562}
 563
 564static int k3_r5f_reset(struct udevice *dev)
 565{
 566        return 0;
 567}
 568
 569static const struct dm_rproc_ops k3_r5f_rproc_ops = {
 570        .init = k3_r5f_init,
 571        .reset = k3_r5f_reset,
 572        .start = k3_r5f_start,
 573        .stop = k3_r5f_stop,
 574        .load = k3_r5f_load,
 575        .device_to_virt = k3_r5f_da_to_va,
 576};
 577
 578static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
 579{
 580        struct k3_r5f_cluster *cluster = core->cluster;
 581        u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
 582        bool lockstep_permitted;
 583        u64 boot_vec = 0;
 584        int ret;
 585
 586        dev_dbg(core->dev, "%s\n", __func__);
 587
 588        ret = ti_sci_proc_request(&core->tsp);
 589        if (ret < 0)
 590                return ret;
 591
 592        /* Do not touch boot vector now. Load will take care of it. */
 593        clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
 594
 595        ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
 596        if (ret)
 597                goto out;
 598
 599        /* Sanity check for Lockstep mode */
 600        lockstep_permitted = !!(sts &
 601                                PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
 602        if (cluster->mode && is_primary_core(core) && !lockstep_permitted) {
 603                dev_err(core->dev, "LockStep mode not permitted on this device\n");
 604                ret = -EINVAL;
 605                goto out;
 606        }
 607
 608        /* Primary core only configuration */
 609        if (is_primary_core(core)) {
 610                /* always enable ARM mode */
 611                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
 612                if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
 613                        set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
 614                else if (lockstep_permitted)
 615                        clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
 616        }
 617
 618        if (core->atcm_enable)
 619                set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
 620        else
 621                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
 622
 623        if (core->btcm_enable)
 624                set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
 625        else
 626                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
 627
 628        if (core->loczrama)
 629                set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
 630        else
 631                clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
 632
 633        ret = k3_r5f_core_halt(core);
 634        if (ret)
 635                goto out;
 636
 637        ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
 638out:
 639        ti_sci_proc_release(&core->tsp);
 640        return ret;
 641}
 642
 643static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
 644{
 645        u32 ids[2];
 646        int ret;
 647
 648        dev_dbg(dev, "%s\n", __func__);
 649
 650        tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
 651        if (IS_ERR(tsp->sci)) {
 652                dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
 653                return PTR_ERR(tsp->sci);
 654        }
 655
 656        ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
 657        if (ret) {
 658                dev_err(dev, "Proc IDs not populated %d\n", ret);
 659                return ret;
 660        }
 661
 662        tsp->ops = &tsp->sci->ops.proc_ops;
 663        tsp->proc_id = ids[0];
 664        tsp->host_id = ids[1];
 665        tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
 666                                           TI_SCI_RESOURCE_NULL);
 667        if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
 668                dev_err(dev, "Device ID not populated %d\n", ret);
 669                return -ENODEV;
 670        }
 671
 672        return 0;
 673}
 674
 675static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
 676{
 677        int ret;
 678
 679        dev_dbg(core->dev, "%s\n", __func__);
 680
 681        core->atcm_enable = dev_read_u32_default(core->dev, "ti,atcm-enable", 0);
 682        core->btcm_enable = dev_read_u32_default(core->dev, "ti,btcm-enable", 1);
 683        core->loczrama = dev_read_u32_default(core->dev, "ti,loczrama", 1);
 684
 685        ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
 686        if (ret)
 687                return ret;
 688
 689        ret = reset_get_by_index(core->dev, 0, &core->reset);
 690        if (ret) {
 691                dev_err(core->dev, "Reset lines not available: %d\n", ret);
 692                return ret;
 693        }
 694
 695        core->ipdata = (struct k3_r5f_ip_data *)dev_get_driver_data(core->dev);
 696
 697        return 0;
 698}
 699
 700static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
 701{
 702        static const char * const mem_names[] = {"atcm", "btcm"};
 703        struct udevice *dev = core->dev;
 704        int i;
 705
 706        dev_dbg(dev, "%s\n", __func__);
 707
 708        core->num_mems = ARRAY_SIZE(mem_names);
 709        core->mem = calloc(core->num_mems, sizeof(*core->mem));
 710        if (!core->mem)
 711                return -ENOMEM;
 712
 713        for (i = 0; i < core->num_mems; i++) {
 714                core->mem[i].bus_addr = dev_read_addr_size_name(dev,
 715                                                                mem_names[i],
 716                                        (fdt_addr_t *)&core->mem[i].size);
 717                if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
 718                        dev_err(dev, "%s bus address not found\n",
 719                                mem_names[i]);
 720                        return -EINVAL;
 721                }
 722                core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
 723                                                    core->mem[i].size,
 724                                                    MAP_NOCACHE);
 725                if (!strcmp(mem_names[i], "atcm")) {
 726                        core->mem[i].dev_addr = core->loczrama ?
 727                                                        0 : K3_R5_TCM_DEV_ADDR;
 728                } else {
 729                        core->mem[i].dev_addr = core->loczrama ?
 730                                                        K3_R5_TCM_DEV_ADDR : 0;
 731                }
 732
 733                dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
 734                        mem_names[i], &core->mem[i].bus_addr,
 735                        core->mem[i].size, core->mem[i].cpu_addr,
 736                        core->mem[i].dev_addr);
 737        }
 738
 739        return 0;
 740}
 741
 742/*
 743 * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
 744 * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
 745 * cores are usable in Split-mode, but only the Core0 TCMs can be used in
 746 * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
 747 * leveraging the Core1 TCMs as well in certain modes where they would have
 748 * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs). This is done by
 749 * making a Core1 TCM visible immediately after the corresponding Core0 TCM.
 750 * The SoC memory map uses the larger 64 KB sizes for the Core0 TCMs, and the
 751 * dts representation reflects this increased size on supported SoCs. The Core0
 752 * TCM sizes therefore have to be adjusted to only half the original size in
 753 * Split mode.
 754 */
 755static void k3_r5f_core_adjust_tcm_sizes(struct k3_r5f_core *core)
 756{
 757        struct k3_r5f_cluster *cluster = core->cluster;
 758
 759        if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
 760                return;
 761
 762        if (!core->ipdata->tcm_is_double)
 763                return;
 764
 765        if (core == cluster->cores[0]) {
 766                core->mem[0].size /= 2;
 767                core->mem[1].size /= 2;
 768
 769                dev_dbg(core->dev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
 770                        core->mem[0].size, core->mem[1].size);
 771        }
 772}
 773
 774/**
 775 * k3_r5f_probe() - Basic probe
 776 * @dev:        corresponding k3 remote processor device
 777 *
 778 * Return: 0 if all goes good, else appropriate error message.
 779 */
 780static int k3_r5f_probe(struct udevice *dev)
 781{
 782        struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
 783        struct k3_r5f_core *core = dev_get_priv(dev);
 784        bool r_state;
 785        int ret;
 786
 787        dev_dbg(dev, "%s\n", __func__);
 788
 789        core->dev = dev;
 790        ret = k3_r5f_of_to_priv(core);
 791        if (ret)
 792                return ret;
 793
 794        core->cluster = cluster;
 795        /* Assume Primary core gets probed first */
 796        if (!cluster->cores[0])
 797                cluster->cores[0] = core;
 798        else
 799                cluster->cores[1] = core;
 800
 801        ret = k3_r5f_core_of_get_memories(core);
 802        if (ret) {
 803                dev_err(dev, "Rproc getting internal memories failed\n");
 804                return ret;
 805        }
 806
 807        /*
 808         * The PM functionality is not supported by the firmware during
 809         * SPL execution with the separated DM firmware image. The following
 810         * piece of code is not compiled in that case.
 811         */
 812        if (!IS_ENABLED(CONFIG_K3_DM_FW)) {
 813                ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci,
 814                                                       core->tsp.dev_id,
 815                                                       &r_state, &core->in_use);
 816                if (ret)
 817                        return ret;
 818
 819                if (core->in_use) {
 820                        dev_info(dev, "Core %d is already in use. No rproc commands work\n",
 821                                 core->tsp.proc_id);
 822                        return 0;
 823                }
 824
 825                /* Make sure Local reset is asserted. Redundant? */
 826                reset_assert(&core->reset);
 827        }
 828
 829        ret = k3_r5f_rproc_configure(core);
 830        if (ret) {
 831                dev_err(dev, "rproc configure failed %d\n", ret);
 832                return ret;
 833        }
 834
 835        k3_r5f_core_adjust_tcm_sizes(core);
 836
 837        dev_dbg(dev, "Remoteproc successfully probed\n");
 838
 839        return 0;
 840}
 841
 842static int k3_r5f_remove(struct udevice *dev)
 843{
 844        struct k3_r5f_core *core = dev_get_priv(dev);
 845
 846        free(core->mem);
 847
 848        ti_sci_proc_release(&core->tsp);
 849
 850        return 0;
 851}
 852
 853static const struct k3_r5f_ip_data k3_data = {
 854        .tcm_is_double = false,
 855        .tcm_ecc_autoinit = false,
 856};
 857
 858static const struct k3_r5f_ip_data j7200_data = {
 859        .tcm_is_double = true,
 860        .tcm_ecc_autoinit = true,
 861};
 862
 863static const struct udevice_id k3_r5f_rproc_ids[] = {
 864        { .compatible = "ti,am654-r5f", .data = (ulong)&k3_data, },
 865        { .compatible = "ti,j721e-r5f", .data = (ulong)&k3_data, },
 866        { .compatible = "ti,j7200-r5f", .data = (ulong)&j7200_data, },
 867        {}
 868};
 869
 870U_BOOT_DRIVER(k3_r5f_rproc) = {
 871        .name = "k3_r5f_rproc",
 872        .of_match = k3_r5f_rproc_ids,
 873        .id = UCLASS_REMOTEPROC,
 874        .ops = &k3_r5f_rproc_ops,
 875        .probe = k3_r5f_probe,
 876        .remove = k3_r5f_remove,
 877        .priv_auto      = sizeof(struct k3_r5f_core),
 878};
 879
 880static int k3_r5f_cluster_probe(struct udevice *dev)
 881{
 882        struct k3_r5f_cluster *cluster = dev_get_priv(dev);
 883
 884        dev_dbg(dev, "%s\n", __func__);
 885
 886        cluster->mode = dev_read_u32_default(dev, "ti,cluster-mode",
 887                                             CLUSTER_MODE_LOCKSTEP);
 888
 889        if (device_get_child_count(dev) != 2) {
 890                dev_err(dev, "Invalid number of R5 cores");
 891                return -EINVAL;
 892        }
 893
 894        dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
 895                __func__, cluster->mode ? "lockstep" : "split");
 896
 897        return 0;
 898}
 899
 900static const struct udevice_id k3_r5fss_ids[] = {
 901        { .compatible = "ti,am654-r5fss"},
 902        { .compatible = "ti,j721e-r5fss"},
 903        { .compatible = "ti,j7200-r5fss"},
 904        {}
 905};
 906
 907U_BOOT_DRIVER(k3_r5fss) = {
 908        .name = "k3_r5fss",
 909        .of_match = k3_r5fss_ids,
 910        .id = UCLASS_MISC,
 911        .probe = k3_r5f_cluster_probe,
 912        .priv_auto      = sizeof(struct k3_r5f_cluster),
 913        .flags = DM_FLAG_DEFAULT_PD_CTRL_OFF,
 914};
 915