uboot/drivers/firmware/ti_sci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Texas Instruments System Control Interface Protocol Driver
   4 * Based on drivers/firmware/ti_sci.c from Linux.
   5 *
   6 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
   7 *      Lokesh Vutla <lokeshvutla@ti.com>
   8 */
   9
  10#include <common.h>
  11#include <dm.h>
  12#include <errno.h>
  13#include <log.h>
  14#include <mailbox.h>
  15#include <malloc.h>
  16#include <dm/device.h>
  17#include <dm/device_compat.h>
  18#include <dm/devres.h>
  19#include <linux/bitops.h>
  20#include <linux/compat.h>
  21#include <linux/err.h>
  22#include <linux/soc/ti/k3-sec-proxy.h>
  23#include <linux/soc/ti/ti_sci_protocol.h>
  24
  25#include "ti_sci.h"
  26#include "ti_sci_static_data.h"
  27
  28/* List of all TI SCI devices active in system */
  29static LIST_HEAD(ti_sci_list);
  30
  31/**
  32 * struct ti_sci_xfer - Structure representing a message flow
  33 * @tx_message: Transmit message
  34 * @rx_len:     Receive message length
  35 */
  36struct ti_sci_xfer {
  37        struct k3_sec_proxy_msg tx_message;
  38        u8 rx_len;
  39};
  40
  41/**
  42 * struct ti_sci_rm_type_map - Structure representing TISCI Resource
  43 *                              management representation of dev_ids.
  44 * @dev_id:     TISCI device ID
  45 * @type:       Corresponding id as identified by TISCI RM.
  46 *
  47 * Note: This is used only as a work around for using RM range apis
  48 *      for AM654 SoC. For future SoCs dev_id will be used as type
  49 *      for RM range APIs. In order to maintain ABI backward compatibility
  50 *      type is not being changed for AM654 SoC.
  51 */
  52struct ti_sci_rm_type_map {
  53        u32 dev_id;
  54        u16 type;
  55};
  56
  57/**
  58 * struct ti_sci_desc - Description of SoC integration
  59 * @default_host_id:    Host identifier representing the compute entity
  60 * @max_rx_timeout_ms:  Timeout for communication with SoC (in Milliseconds)
  61 * @max_msgs: Maximum number of messages that can be pending
  62 *                simultaneously in the system
  63 * @max_msg_size: Maximum size of data per message that can be handled.
  64 */
  65struct ti_sci_desc {
  66        u8 default_host_id;
  67        int max_rx_timeout_ms;
  68        int max_msgs;
  69        int max_msg_size;
  70};
  71
  72/**
  73 * struct ti_sci_info - Structure representing a TI SCI instance
  74 * @dev:        Device pointer
  75 * @desc:       SoC description for this instance
  76 * @handle:     Instance of TI SCI handle to send to clients.
  77 * @chan_tx:    Transmit mailbox channel
  78 * @chan_rx:    Receive mailbox channel
  79 * @xfer:       xfer info
  80 * @list:       list head
  81 * @is_secure:  Determines if the communication is through secure threads.
  82 * @host_id:    Host identifier representing the compute entity
  83 * @seq:        Seq id used for verification for tx and rx message.
  84 */
  85struct ti_sci_info {
  86        struct udevice *dev;
  87        const struct ti_sci_desc *desc;
  88        struct ti_sci_handle handle;
  89        struct mbox_chan chan_tx;
  90        struct mbox_chan chan_rx;
  91        struct mbox_chan chan_notify;
  92        struct ti_sci_xfer xfer;
  93        struct list_head list;
  94        struct list_head dev_list;
  95        bool is_secure;
  96        u8 host_id;
  97        u8 seq;
  98};
  99
 100struct ti_sci_exclusive_dev {
 101        u32 id;
 102        u32 count;
 103        struct list_head list;
 104};
 105
 106#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
 107
 108/**
 109 * ti_sci_setup_one_xfer() - Setup one message type
 110 * @info:       Pointer to SCI entity information
 111 * @msg_type:   Message type
 112 * @msg_flags:  Flag to set for the message
 113 * @buf:        Buffer to be send to mailbox channel
 114 * @tx_message_size: transmit message size
 115 * @rx_message_size: receive message size. may be set to zero for send-only
 116 *                   transactions.
 117 *
 118 * Helper function which is used by various command functions that are
 119 * exposed to clients of this driver for allocating a message traffic event.
 120 *
 121 * Return: Corresponding ti_sci_xfer pointer if all went fine,
 122 *         else appropriate error pointer.
 123 */
 124static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
 125                                                 u16 msg_type, u32 msg_flags,
 126                                                 u32 *buf,
 127                                                 size_t tx_message_size,
 128                                                 size_t rx_message_size)
 129{
 130        struct ti_sci_xfer *xfer = &info->xfer;
 131        struct ti_sci_msg_hdr *hdr;
 132
 133        /* Ensure we have sane transfer sizes */
 134        if (rx_message_size > info->desc->max_msg_size ||
 135            tx_message_size > info->desc->max_msg_size ||
 136            (rx_message_size > 0 && rx_message_size < sizeof(*hdr)) ||
 137            tx_message_size < sizeof(*hdr))
 138                return ERR_PTR(-ERANGE);
 139
 140        info->seq = ~info->seq;
 141        xfer->tx_message.buf = buf;
 142        xfer->tx_message.len = tx_message_size;
 143        xfer->rx_len = (u8)rx_message_size;
 144
 145        hdr = (struct ti_sci_msg_hdr *)buf;
 146        hdr->seq = info->seq;
 147        hdr->type = msg_type;
 148        hdr->host = info->host_id;
 149        hdr->flags = msg_flags;
 150
 151        return xfer;
 152}
 153
 154/**
 155 * ti_sci_get_response() - Receive response from mailbox channel
 156 * @info:       Pointer to SCI entity information
 157 * @xfer:       Transfer to initiate and wait for response
 158 * @chan:       Channel to receive the response
 159 *
 160 * Return: -ETIMEDOUT in case of no response, if transmit error,
 161 *         return corresponding error, else if all goes well,
 162 *         return 0.
 163 */
 164static inline int ti_sci_get_response(struct ti_sci_info *info,
 165                                      struct ti_sci_xfer *xfer,
 166                                      struct mbox_chan *chan)
 167{
 168        struct k3_sec_proxy_msg *msg = &xfer->tx_message;
 169        struct ti_sci_secure_msg_hdr *secure_hdr;
 170        struct ti_sci_msg_hdr *hdr;
 171        int ret;
 172
 173        /* Receive the response */
 174        ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
 175        if (ret) {
 176                dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
 177                        __func__, ret);
 178                return ret;
 179        }
 180
 181        /* ToDo: Verify checksum */
 182        if (info->is_secure) {
 183                secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
 184                msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
 185        }
 186
 187        /* msg is updated by mailbox driver */
 188        hdr = (struct ti_sci_msg_hdr *)msg->buf;
 189
 190        /* Sanity check for message response */
 191        if (hdr->seq != info->seq) {
 192                dev_dbg(info->dev, "%s: Message for %d is not expected\n",
 193                        __func__, hdr->seq);
 194                return ret;
 195        }
 196
 197        if (msg->len > info->desc->max_msg_size) {
 198                dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
 199                        __func__, msg->len, info->desc->max_msg_size);
 200                return -EINVAL;
 201        }
 202
 203        if (msg->len < xfer->rx_len) {
 204                dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
 205                        __func__, msg->len, xfer->rx_len);
 206        }
 207
 208        return ret;
 209}
 210
 211/**
 212 * ti_sci_do_xfer() - Do one transfer
 213 * @info:       Pointer to SCI entity information
 214 * @xfer:       Transfer to initiate and wait for response
 215 *
 216 * Return: 0 if all went fine, else return appropriate error.
 217 */
 218static inline int ti_sci_do_xfer(struct ti_sci_info *info,
 219                                 struct ti_sci_xfer *xfer)
 220{
 221        struct k3_sec_proxy_msg *msg = &xfer->tx_message;
 222        u8 secure_buf[info->desc->max_msg_size];
 223        struct ti_sci_secure_msg_hdr secure_hdr;
 224        int ret;
 225
 226        if (info->is_secure) {
 227                /* ToDo: get checksum of the entire message */
 228                secure_hdr.checksum = 0;
 229                secure_hdr.reserved = 0;
 230                memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
 231                       xfer->tx_message.len);
 232
 233                xfer->tx_message.buf = (u32 *)secure_buf;
 234                xfer->tx_message.len += sizeof(secure_hdr);
 235
 236                if (xfer->rx_len)
 237                        xfer->rx_len += sizeof(secure_hdr);
 238        }
 239
 240        /* Send the message */
 241        ret = mbox_send(&info->chan_tx, msg);
 242        if (ret) {
 243                dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
 244                        __func__, ret);
 245                return ret;
 246        }
 247
 248        /* Get response if requested */
 249        if (xfer->rx_len)
 250                ret = ti_sci_get_response(info, xfer, &info->chan_rx);
 251
 252        return ret;
 253}
 254
 255/**
 256 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
 257 * @handle:     pointer to TI SCI handle
 258 *
 259 * Updates the SCI information in the internal data structure.
 260 *
 261 * Return: 0 if all went fine, else return appropriate error.
 262 */
 263static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
 264{
 265        struct ti_sci_msg_resp_version *rev_info;
 266        struct ti_sci_version_info *ver;
 267        struct ti_sci_msg_hdr hdr;
 268        struct ti_sci_info *info;
 269        struct ti_sci_xfer *xfer;
 270        int ret;
 271
 272        if (IS_ERR(handle))
 273                return PTR_ERR(handle);
 274        if (!handle)
 275                return -EINVAL;
 276
 277        info = handle_to_ti_sci_info(handle);
 278
 279        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
 280                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
 281                                     (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
 282                                     sizeof(*rev_info));
 283        if (IS_ERR(xfer)) {
 284                ret = PTR_ERR(xfer);
 285                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
 286                return ret;
 287        }
 288
 289        ret = ti_sci_do_xfer(info, xfer);
 290        if (ret) {
 291                dev_err(info->dev, "Mbox communication fail %d\n", ret);
 292                return ret;
 293        }
 294
 295        rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
 296
 297        ver = &handle->version;
 298        ver->abi_major = rev_info->abi_major;
 299        ver->abi_minor = rev_info->abi_minor;
 300        ver->firmware_revision = rev_info->firmware_revision;
 301        strncpy(ver->firmware_description, rev_info->firmware_description,
 302                sizeof(ver->firmware_description));
 303
 304        return 0;
 305}
 306
 307/**
 308 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
 309 * @r:  pointer to response buffer
 310 *
 311 * Return: true if the response was an ACK, else returns false.
 312 */
 313static inline bool ti_sci_is_response_ack(void *r)
 314{
 315        struct ti_sci_msg_hdr *hdr = r;
 316
 317        return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
 318}
 319
 320/**
 321 * cmd_set_board_config_using_msg() - Common command to send board configuration
 322 *                                    message
 323 * @handle:     pointer to TI SCI handle
 324 * @msg_type:   One of the TISCI message types to set board configuration
 325 * @addr:       Address where the board config structure is located
 326 * @size:       Size of the board config structure
 327 *
 328 * Return: 0 if all went well, else returns appropriate error value.
 329 */
 330static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
 331                                          u16 msg_type, u64 addr, u32 size)
 332{
 333        struct ti_sci_msg_board_config req;
 334        struct ti_sci_msg_hdr *resp;
 335        struct ti_sci_info *info;
 336        struct ti_sci_xfer *xfer;
 337        int ret = 0;
 338
 339        if (IS_ERR(handle))
 340                return PTR_ERR(handle);
 341        if (!handle)
 342                return -EINVAL;
 343
 344        info = handle_to_ti_sci_info(handle);
 345
 346        xfer = ti_sci_setup_one_xfer(info, msg_type,
 347                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
 348                                     (u32 *)&req, sizeof(req), sizeof(*resp));
 349        if (IS_ERR(xfer)) {
 350                ret = PTR_ERR(xfer);
 351                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
 352                return ret;
 353        }
 354        req.boardcfgp_high = (addr >> 32) & 0xffffffff;
 355        req.boardcfgp_low = addr & 0xffffffff;
 356        req.boardcfg_size = size;
 357
 358        ret = ti_sci_do_xfer(info, xfer);
 359        if (ret) {
 360                dev_err(info->dev, "Mbox send fail %d\n", ret);
 361                return ret;
 362        }
 363
 364        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
 365
 366        if (!ti_sci_is_response_ack(resp))
 367                return -ENODEV;
 368
 369        return ret;
 370}
 371
 372/**
 373 * ti_sci_cmd_set_board_config() - Command to send board configuration message
 374 * @handle:     pointer to TI SCI handle
 375 * @addr:       Address where the board config structure is located
 376 * @size:       Size of the board config structure
 377 *
 378 * Return: 0 if all went well, else returns appropriate error value.
 379 */
 380static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
 381                                       u64 addr, u32 size)
 382{
 383        return cmd_set_board_config_using_msg(handle,
 384                                              TI_SCI_MSG_BOARD_CONFIG,
 385                                              addr, size);
 386}
 387
 388/**
 389 * ti_sci_cmd_set_board_config_rm() - Command to send board resource
 390 *                                    management configuration
 391 * @handle:     pointer to TI SCI handle
 392 * @addr:       Address where the board RM config structure is located
 393 * @size:       Size of the RM config structure
 394 *
 395 * Return: 0 if all went well, else returns appropriate error value.
 396 */
 397static
 398int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
 399                                   u64 addr, u32 size)
 400{
 401        return cmd_set_board_config_using_msg(handle,
 402                                              TI_SCI_MSG_BOARD_CONFIG_RM,
 403                                              addr, size);
 404}
 405
 406/**
 407 * ti_sci_cmd_set_board_config_security() - Command to send board security
 408 *                                          configuration message
 409 * @handle:     pointer to TI SCI handle
 410 * @addr:       Address where the board security config structure is located
 411 * @size:       Size of the security config structure
 412 *
 413 * Return: 0 if all went well, else returns appropriate error value.
 414 */
 415static
 416int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
 417                                         u64 addr, u32 size)
 418{
 419        return cmd_set_board_config_using_msg(handle,
 420                                              TI_SCI_MSG_BOARD_CONFIG_SECURITY,
 421                                              addr, size);
 422}
 423
 424/**
 425 * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
 426 *                                    configuration message
 427 * @handle:     pointer to TI SCI handle
 428 * @addr:       Address where the board PM config structure is located
 429 * @size:       Size of the PM config structure
 430 *
 431 * Return: 0 if all went well, else returns appropriate error value.
 432 */
 433static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
 434                                          u64 addr, u32 size)
 435{
 436        return cmd_set_board_config_using_msg(handle,
 437                                              TI_SCI_MSG_BOARD_CONFIG_PM,
 438                                              addr, size);
 439}
 440
 441static struct ti_sci_exclusive_dev
 442*ti_sci_get_exclusive_dev(struct list_head *dev_list, u32 id)
 443{
 444        struct ti_sci_exclusive_dev *dev;
 445
 446        list_for_each_entry(dev, dev_list, list)
 447                if (dev->id == id)
 448                        return dev;
 449
 450        return NULL;
 451}
 452
 453static void ti_sci_add_exclusive_dev(struct ti_sci_info *info, u32 id)
 454{
 455        struct ti_sci_exclusive_dev *dev;
 456
 457        dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
 458        if (dev) {
 459                dev->count++;
 460                return;
 461        }
 462
 463        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 464        dev->id = id;
 465        dev->count = 1;
 466        INIT_LIST_HEAD(&dev->list);
 467        list_add_tail(&dev->list, &info->dev_list);
 468}
 469
 470static void ti_sci_delete_exclusive_dev(struct ti_sci_info *info, u32 id)
 471{
 472        struct ti_sci_exclusive_dev *dev;
 473
 474        dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
 475        if (!dev)
 476                return;
 477
 478        if (dev->count > 0)
 479                dev->count--;
 480}
 481
 482/**
 483 * ti_sci_set_device_state() - Set device state helper
 484 * @handle:     pointer to TI SCI handle
 485 * @id:         Device identifier
 486 * @flags:      flags to setup for the device
 487 * @state:      State to move the device to
 488 *
 489 * Return: 0 if all went well, else returns appropriate error value.
 490 */
 491static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
 492                                   u32 id, u32 flags, u8 state)
 493{
 494        struct ti_sci_msg_req_set_device_state req;
 495        struct ti_sci_msg_hdr *resp;
 496        struct ti_sci_info *info;
 497        struct ti_sci_xfer *xfer;
 498        int ret = 0;
 499
 500        if (IS_ERR(handle))
 501                return PTR_ERR(handle);
 502        if (!handle)
 503                return -EINVAL;
 504
 505        info = handle_to_ti_sci_info(handle);
 506
 507        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
 508                                     flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
 509                                     (u32 *)&req, sizeof(req), sizeof(*resp));
 510        if (IS_ERR(xfer)) {
 511                ret = PTR_ERR(xfer);
 512                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
 513                return ret;
 514        }
 515        req.id = id;
 516        req.state = state;
 517
 518        ret = ti_sci_do_xfer(info, xfer);
 519        if (ret) {
 520                dev_err(info->dev, "Mbox send fail %d\n", ret);
 521                return ret;
 522        }
 523
 524        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
 525
 526        if (!ti_sci_is_response_ack(resp))
 527                return -ENODEV;
 528
 529        if (state == MSG_DEVICE_SW_STATE_AUTO_OFF)
 530                ti_sci_delete_exclusive_dev(info, id);
 531        else if (flags & MSG_FLAG_DEVICE_EXCLUSIVE)
 532                ti_sci_add_exclusive_dev(info, id);
 533
 534        return ret;
 535}
 536
 537/**
 538 * ti_sci_set_device_state_no_wait() - Set device state helper without
 539 *                                     requesting or waiting for a response.
 540 * @handle:     pointer to TI SCI handle
 541 * @id:         Device identifier
 542 * @flags:      flags to setup for the device
 543 * @state:      State to move the device to
 544 *
 545 * Return: 0 if all went well, else returns appropriate error value.
 546 */
 547static int ti_sci_set_device_state_no_wait(const struct ti_sci_handle *handle,
 548                                           u32 id, u32 flags, u8 state)
 549{
 550        struct ti_sci_msg_req_set_device_state req;
 551        struct ti_sci_info *info;
 552        struct ti_sci_xfer *xfer;
 553        int ret = 0;
 554
 555        if (IS_ERR(handle))
 556                return PTR_ERR(handle);
 557        if (!handle)
 558                return -EINVAL;
 559
 560        info = handle_to_ti_sci_info(handle);
 561
 562        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
 563                                     flags | TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
 564                                     (u32 *)&req, sizeof(req), 0);
 565        if (IS_ERR(xfer)) {
 566                ret = PTR_ERR(xfer);
 567                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
 568                return ret;
 569        }
 570        req.id = id;
 571        req.state = state;
 572
 573        ret = ti_sci_do_xfer(info, xfer);
 574        if (ret)
 575                dev_err(info->dev, "Mbox send fail %d\n", ret);
 576
 577        return ret;
 578}
 579
 580/**
 581 * ti_sci_get_device_state() - Get device state helper
 582 * @handle:     Handle to the device
 583 * @id:         Device Identifier
 584 * @clcnt:      Pointer to Context Loss Count
 585 * @resets:     pointer to resets
 586 * @p_state:    pointer to p_state
 587 * @c_state:    pointer to c_state
 588 *
 589 * Return: 0 if all went fine, else return appropriate error.
 590 */
 591static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
 592                                   u32 id,  u32 *clcnt,  u32 *resets,
 593                                   u8 *p_state,  u8 *c_state)
 594{
 595        struct ti_sci_msg_resp_get_device_state *resp;
 596        struct ti_sci_msg_req_get_device_state req;
 597        struct ti_sci_info *info;
 598        struct ti_sci_xfer *xfer;
 599        int ret = 0;
 600
 601        if (IS_ERR(handle))
 602                return PTR_ERR(handle);
 603        if (!handle)
 604                return -EINVAL;
 605
 606        if (!clcnt && !resets && !p_state && !c_state)
 607                return -EINVAL;
 608
 609        info = handle_to_ti_sci_info(handle);
 610
 611        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
 612                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
 613                                     (u32 *)&req, sizeof(req), sizeof(*resp));
 614        if (IS_ERR(xfer)) {
 615                ret = PTR_ERR(xfer);
 616                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
 617                return ret;
 618        }
 619        req.id = id;
 620
 621        ret = ti_sci_do_xfer(info, xfer);
 622        if (ret) {
 623                dev_err(info->dev, "Mbox send fail %d\n", ret);
 624                return ret;
 625        }
 626
 627        resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
 628        if (!ti_sci_is_response_ack(resp))
 629                return -ENODEV;
 630
 631        if (clcnt)
 632                *clcnt = resp->context_loss_count;
 633        if (resets)
 634                *resets = resp->resets;
 635        if (p_state)
 636                *p_state = resp->programmed_state;
 637        if (c_state)
 638                *c_state = resp->current_state;
 639
 640        return ret;
 641}
 642
 643/**
 644 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
 645 * @handle:     Pointer to TISCI handle as retrieved by *ti_sci_get_handle
 646 * @id:         Device Identifier
 647 *
 648 * Request for the device - NOTE: the client MUST maintain integrity of
 649 * usage count by balancing get_device with put_device. No refcounting is
 650 * managed by driver for that purpose.
 651 *
 652 * NOTE: The request is for exclusive access for the processor.
 653 *
 654 * Return: 0 if all went fine, else return appropriate error.
 655 */
 656static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
 657{
 658        return ti_sci_set_device_state(handle, id, 0,
 659                                       MSG_DEVICE_SW_STATE_ON);
 660}
 661
 662static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
 663                                           u32 id)
 664{
 665        return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
 666                                       MSG_DEVICE_SW_STATE_ON);
 667}
 668
 669/**
 670 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
 671 * @handle:     Pointer to TISCI handle as retrieved by *ti_sci_get_handle
 672 * @id:         Device Identifier
 673 *
 674 * Request for the device - NOTE: the client MUST maintain integrity of
 675 * usage count by balancing get_device with put_device. No refcounting is
 676 * managed by driver for that purpose.
 677 *
 678 * Return: 0 if all went fine, else return appropriate error.
 679 */
 680static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
 681{
 682        return ti_sci_set_device_state(handle, id,
 683                                       0,
 684                                       MSG_DEVICE_SW_STATE_RETENTION);
 685}
 686
 687static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
 688                                            u32 id)
 689{
 690        return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
 691                                       MSG_DEVICE_SW_STATE_RETENTION);
 692}
 693
 694/**
 695 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
 696 * @handle:     Pointer to TISCI handle as retrieved by *ti_sci_get_handle
 697 * @id:         Device Identifier
 698 *
 699 * Request for the device - NOTE: the client MUST maintain integrity of
 700 * usage count by balancing get_device with put_device. No refcounting is
 701 * managed by driver for that purpose.
 702 *
 703 * Return: 0 if all went fine, else return appropriate error.
 704 */
 705static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
 706{
 707        return ti_sci_set_device_state(handle, id, 0,
 708                                       MSG_DEVICE_SW_STATE_AUTO_OFF);
 709}
 710
 711static
 712int ti_sci_cmd_release_exclusive_devices(const struct ti_sci_handle *handle)
 713{
 714        struct ti_sci_exclusive_dev *dev, *tmp;
 715        struct ti_sci_info *info;
 716        int i, cnt;
 717
 718        info = handle_to_ti_sci_info(handle);
 719
 720        list_for_each_entry_safe(dev, tmp, &info->dev_list, list) {
 721                cnt = dev->count;
 722                debug("%s: id = %d, cnt = %d\n", __func__, dev->id, cnt);
 723                for (i = 0; i < cnt; i++)
 724                        ti_sci_cmd_put_device(handle, dev->id);
 725        }
 726
 727        return 0;
 728}
 729
 730/**
 731 * ti_sci_cmd_dev_is_valid() - Is the device valid
 732 * @handle:     Pointer to TISCI handle as retrieved by *ti_sci_get_handle
 733 * @id:         Device Identifier
 734 *
 735 * Return: 0 if all went fine and the device ID is valid, else return
 736 * appropriate error.
 737 */
 738static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
 739{
 740        u8 unused;
 741
 742        /* check the device state which will also tell us if the ID is valid */
 743        return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
 744}
 745
 746/**
 747 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
 748 * @handle:     Pointer to TISCI handle
 749 * @id:         Device Identifier
 750 * @count:      Pointer to Context Loss counter to populate
 751 *
 752 * Return: 0 if all went fine, else return appropriate error.
 753 */
 754static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
 755                                    u32 *count)
 756{
 757        return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
 758}
 759
 760/**
 761 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
 762 * @handle:     Pointer to TISCI handle
 763 * @id:         Device Identifier
 764 * @r_state:    true if requested to be idle
 765 *
 766 * Return: 0 if all went fine, else return appropriate error.
 767 */
 768static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
 769                                  bool *r_state)
 770{
 771        int ret;
 772        u8 state;
 773
 774        if (!r_state)
 775                return -EINVAL;
 776
 777        ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
 778        if (ret)
 779                return ret;
 780
 781        *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
 782
 783        return 0;
 784}
 785
 786/**
 787 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
 788 * @handle:     Pointer to TISCI handle
 789 * @id:         Device Identifier
 790 * @r_state:    true if requested to be stopped
 791 * @curr_state: true if currently stopped.
 792 *
 793 * Return: 0 if all went fine, else return appropriate error.
 794 */
 795static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
 796                                  bool *r_state,  bool *curr_state)
 797{
 798        int ret;
 799        u8 p_state, c_state;
 800
 801        if (!r_state && !curr_state)
 802                return -EINVAL;
 803
 804        ret =
 805            ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
 806        if (ret)
 807                return ret;
 808
 809        if (r_state)
 810                *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
 811        if (curr_state)
 812                *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
 813
 814        return 0;
 815}
 816
 817/**
 818 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
 819 * @handle:     Pointer to TISCI handle
 820 * @id:         Device Identifier
 821 * @r_state:    true if requested to be ON
 822 * @curr_state: true if currently ON and active
 823 *
 824 * Return: 0 if all went fine, else return appropriate error.
 825 */
 826static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
 827                                bool *r_state,  bool *curr_state)
 828{
 829        int ret;
 830        u8 p_state, c_state;
 831
 832        if (!r_state && !curr_state)
 833                return -EINVAL;
 834
 835        ret =
 836            ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
 837        if (ret)
 838                return ret;
 839
 840        if (r_state)
 841                *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
 842        if (curr_state)
 843                *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
 844
 845        return 0;
 846}
 847
 848/**
 849 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
 850 * @handle:     Pointer to TISCI handle
 851 * @id:         Device Identifier
 852 * @curr_state: true if currently transitioning.
 853 *
 854 * Return: 0 if all went fine, else return appropriate error.
 855 */
 856static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
 857                                   bool *curr_state)
 858{
 859        int ret;
 860        u8 state;
 861
 862        if (!curr_state)
 863                return -EINVAL;
 864
 865        ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
 866        if (ret)
 867                return ret;
 868
 869        *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
 870
 871        return 0;
 872}
 873
 874/**
 875 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
 876 *                                  by TISCI
 877 * @handle:     Pointer to TISCI handle as retrieved by *ti_sci_get_handle
 878 * @id:         Device Identifier
 879 * @reset_state: Device specific reset bit field
 880 *
 881 * Return: 0 if all went fine, else return appropriate error.
 882 */
 883static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
 884                                        u32 id, u32 reset_state)
 885{
 886        struct ti_sci_msg_req_set_device_resets req;
 887        struct ti_sci_msg_hdr *resp;
 888        struct ti_sci_info *info;
 889        struct ti_sci_xfer *xfer;
 890        int ret = 0;
 891
 892        if (IS_ERR(handle))
 893                return PTR_ERR(handle);
 894        if (!handle)
 895                return -EINVAL;
 896
 897        info = handle_to_ti_sci_info(handle);
 898
 899        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
 900                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
 901                                     (u32 *)&req, sizeof(req), sizeof(*resp));
 902        if (IS_ERR(xfer)) {
 903                ret = PTR_ERR(xfer);
 904                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
 905                return ret;
 906        }
 907        req.id = id;
 908        req.resets = reset_state;
 909
 910        ret = ti_sci_do_xfer(info, xfer);
 911        if (ret) {
 912                dev_err(info->dev, "Mbox send fail %d\n", ret);
 913                return ret;
 914        }
 915
 916        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
 917
 918        if (!ti_sci_is_response_ack(resp))
 919                return -ENODEV;
 920
 921        return ret;
 922}
 923
 924/**
 925 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
 926 *                                  by TISCI
 927 * @handle:             Pointer to TISCI handle
 928 * @id:                 Device Identifier
 929 * @reset_state:        Pointer to reset state to populate
 930 *
 931 * Return: 0 if all went fine, else return appropriate error.
 932 */
 933static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
 934                                        u32 id, u32 *reset_state)
 935{
 936        return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
 937                                       NULL);
 938}
 939
 940/**
 941 * ti_sci_set_clock_state() - Set clock state helper
 942 * @handle:     pointer to TI SCI handle
 943 * @dev_id:     Device identifier this request is for
 944 * @clk_id:     Clock identifier for the device for this request.
 945 *              Each device has it's own set of clock inputs. This indexes
 946 *              which clock input to modify.
 947 * @flags:      Header flags as needed
 948 * @state:      State to request for the clock.
 949 *
 950 * Return: 0 if all went well, else returns appropriate error value.
 951 */
 952static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
 953                                  u32 dev_id, u8 clk_id,
 954                                  u32 flags, u8 state)
 955{
 956        struct ti_sci_msg_req_set_clock_state req;
 957        struct ti_sci_msg_hdr *resp;
 958        struct ti_sci_info *info;
 959        struct ti_sci_xfer *xfer;
 960        int ret = 0;
 961
 962        if (IS_ERR(handle))
 963                return PTR_ERR(handle);
 964        if (!handle)
 965                return -EINVAL;
 966
 967        info = handle_to_ti_sci_info(handle);
 968
 969        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
 970                                     flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
 971                                     (u32 *)&req, sizeof(req), sizeof(*resp));
 972        if (IS_ERR(xfer)) {
 973                ret = PTR_ERR(xfer);
 974                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
 975                return ret;
 976        }
 977        req.dev_id = dev_id;
 978        req.clk_id = clk_id;
 979        req.request_state = state;
 980
 981        ret = ti_sci_do_xfer(info, xfer);
 982        if (ret) {
 983                dev_err(info->dev, "Mbox send fail %d\n", ret);
 984                return ret;
 985        }
 986
 987        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
 988
 989        if (!ti_sci_is_response_ack(resp))
 990                return -ENODEV;
 991
 992        return ret;
 993}
 994
 995/**
 996 * ti_sci_cmd_get_clock_state() - Get clock state helper
 997 * @handle:     pointer to TI SCI handle
 998 * @dev_id:     Device identifier this request is for
 999 * @clk_id:     Clock identifier for the device for this request.
1000 *              Each device has it's own set of clock inputs. This indexes
1001 *              which clock input to modify.
1002 * @programmed_state:   State requested for clock to move to
1003 * @current_state:      State that the clock is currently in
1004 *
1005 * Return: 0 if all went well, else returns appropriate error value.
1006 */
1007static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1008                                      u32 dev_id, u8 clk_id,
1009                                      u8 *programmed_state, u8 *current_state)
1010{
1011        struct ti_sci_msg_resp_get_clock_state *resp;
1012        struct ti_sci_msg_req_get_clock_state req;
1013        struct ti_sci_info *info;
1014        struct ti_sci_xfer *xfer;
1015        int ret = 0;
1016
1017        if (IS_ERR(handle))
1018                return PTR_ERR(handle);
1019        if (!handle)
1020                return -EINVAL;
1021
1022        if (!programmed_state && !current_state)
1023                return -EINVAL;
1024
1025        info = handle_to_ti_sci_info(handle);
1026
1027        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1028                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1029                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1030        if (IS_ERR(xfer)) {
1031                ret = PTR_ERR(xfer);
1032                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1033                return ret;
1034        }
1035        req.dev_id = dev_id;
1036        req.clk_id = clk_id;
1037
1038        ret = ti_sci_do_xfer(info, xfer);
1039        if (ret) {
1040                dev_err(info->dev, "Mbox send fail %d\n", ret);
1041                return ret;
1042        }
1043
1044        resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
1045
1046        if (!ti_sci_is_response_ack(resp))
1047                return -ENODEV;
1048
1049        if (programmed_state)
1050                *programmed_state = resp->programmed_state;
1051        if (current_state)
1052                *current_state = resp->current_state;
1053
1054        return ret;
1055}
1056
1057/**
1058 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1059 * @handle:     pointer to TI SCI handle
1060 * @dev_id:     Device identifier this request is for
1061 * @clk_id:     Clock identifier for the device for this request.
1062 *              Each device has it's own set of clock inputs. This indexes
1063 *              which clock input to modify.
1064 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1065 * @can_change_freq: 'true' if frequency change is desired, else 'false'
1066 * @enable_input_term: 'true' if input termination is desired, else 'false'
1067 *
1068 * Return: 0 if all went well, else returns appropriate error value.
1069 */
1070static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1071                                u8 clk_id, bool needs_ssc, bool can_change_freq,
1072                                bool enable_input_term)
1073{
1074        u32 flags = 0;
1075
1076        flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1077        flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1078        flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1079
1080        return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1081                                      MSG_CLOCK_SW_STATE_REQ);
1082}
1083
1084/**
1085 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1086 * @handle:     pointer to TI SCI handle
1087 * @dev_id:     Device identifier this request is for
1088 * @clk_id:     Clock identifier for the device for this request.
1089 *              Each device has it's own set of clock inputs. This indexes
1090 *              which clock input to modify.
1091 *
1092 * NOTE: This clock must have been requested by get_clock previously.
1093 *
1094 * Return: 0 if all went well, else returns appropriate error value.
1095 */
1096static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1097                                 u32 dev_id, u8 clk_id)
1098{
1099        return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1100                                      MSG_CLOCK_SW_STATE_UNREQ);
1101}
1102
1103/**
1104 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1105 * @handle:     pointer to TI SCI handle
1106 * @dev_id:     Device identifier this request is for
1107 * @clk_id:     Clock identifier for the device for this request.
1108 *              Each device has it's own set of clock inputs. This indexes
1109 *              which clock input to modify.
1110 *
1111 * NOTE: This clock must have been requested by get_clock previously.
1112 *
1113 * Return: 0 if all went well, else returns appropriate error value.
1114 */
1115static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1116                                u32 dev_id, u8 clk_id)
1117{
1118        return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1119                                      MSG_CLOCK_SW_STATE_AUTO);
1120}
1121
1122/**
1123 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1124 * @handle:     pointer to TI SCI handle
1125 * @dev_id:     Device identifier this request is for
1126 * @clk_id:     Clock identifier for the device for this request.
1127 *              Each device has it's own set of clock inputs. This indexes
1128 *              which clock input to modify.
1129 * @req_state: state indicating if the clock is auto managed
1130 *
1131 * Return: 0 if all went well, else returns appropriate error value.
1132 */
1133static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1134                                  u32 dev_id, u8 clk_id, bool *req_state)
1135{
1136        u8 state = 0;
1137        int ret;
1138
1139        if (!req_state)
1140                return -EINVAL;
1141
1142        ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1143        if (ret)
1144                return ret;
1145
1146        *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1147        return 0;
1148}
1149
1150/**
1151 * ti_sci_cmd_clk_is_on() - Is the clock ON
1152 * @handle:     pointer to TI SCI handle
1153 * @dev_id:     Device identifier this request is for
1154 * @clk_id:     Clock identifier for the device for this request.
1155 *              Each device has it's own set of clock inputs. This indexes
1156 *              which clock input to modify.
1157 * @req_state: state indicating if the clock is managed by us and enabled
1158 * @curr_state: state indicating if the clock is ready for operation
1159 *
1160 * Return: 0 if all went well, else returns appropriate error value.
1161 */
1162static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1163                                u8 clk_id, bool *req_state, bool *curr_state)
1164{
1165        u8 c_state = 0, r_state = 0;
1166        int ret;
1167
1168        if (!req_state && !curr_state)
1169                return -EINVAL;
1170
1171        ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1172                                         &r_state, &c_state);
1173        if (ret)
1174                return ret;
1175
1176        if (req_state)
1177                *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1178        if (curr_state)
1179                *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1180        return 0;
1181}
1182
1183/**
1184 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1185 * @handle:     pointer to TI SCI handle
1186 * @dev_id:     Device identifier this request is for
1187 * @clk_id:     Clock identifier for the device for this request.
1188 *              Each device has it's own set of clock inputs. This indexes
1189 *              which clock input to modify.
1190 * @req_state: state indicating if the clock is managed by us and disabled
1191 * @curr_state: state indicating if the clock is NOT ready for operation
1192 *
1193 * Return: 0 if all went well, else returns appropriate error value.
1194 */
1195static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1196                                 u8 clk_id, bool *req_state, bool *curr_state)
1197{
1198        u8 c_state = 0, r_state = 0;
1199        int ret;
1200
1201        if (!req_state && !curr_state)
1202                return -EINVAL;
1203
1204        ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1205                                         &r_state, &c_state);
1206        if (ret)
1207                return ret;
1208
1209        if (req_state)
1210                *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1211        if (curr_state)
1212                *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1213        return 0;
1214}
1215
1216/**
1217 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1218 * @handle:     pointer to TI SCI handle
1219 * @dev_id:     Device identifier this request is for
1220 * @clk_id:     Clock identifier for the device for this request.
1221 *              Each device has it's own set of clock inputs. This indexes
1222 *              which clock input to modify.
1223 * @parent_id:  Parent clock identifier to set
1224 *
1225 * Return: 0 if all went well, else returns appropriate error value.
1226 */
1227static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1228                                     u32 dev_id, u8 clk_id, u8 parent_id)
1229{
1230        struct ti_sci_msg_req_set_clock_parent req;
1231        struct ti_sci_msg_hdr *resp;
1232        struct ti_sci_info *info;
1233        struct ti_sci_xfer *xfer;
1234        int ret = 0;
1235
1236        if (IS_ERR(handle))
1237                return PTR_ERR(handle);
1238        if (!handle)
1239                return -EINVAL;
1240
1241        info = handle_to_ti_sci_info(handle);
1242
1243        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1244                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1245                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1246        if (IS_ERR(xfer)) {
1247                ret = PTR_ERR(xfer);
1248                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1249                return ret;
1250        }
1251        req.dev_id = dev_id;
1252        req.clk_id = clk_id;
1253        req.parent_id = parent_id;
1254
1255        ret = ti_sci_do_xfer(info, xfer);
1256        if (ret) {
1257                dev_err(info->dev, "Mbox send fail %d\n", ret);
1258                return ret;
1259        }
1260
1261        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1262
1263        if (!ti_sci_is_response_ack(resp))
1264                return -ENODEV;
1265
1266        return ret;
1267}
1268
1269/**
1270 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1271 * @handle:     pointer to TI SCI handle
1272 * @dev_id:     Device identifier this request is for
1273 * @clk_id:     Clock identifier for the device for this request.
1274 *              Each device has it's own set of clock inputs. This indexes
1275 *              which clock input to modify.
1276 * @parent_id:  Current clock parent
1277 *
1278 * Return: 0 if all went well, else returns appropriate error value.
1279 */
1280static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1281                                     u32 dev_id, u8 clk_id, u8 *parent_id)
1282{
1283        struct ti_sci_msg_resp_get_clock_parent *resp;
1284        struct ti_sci_msg_req_get_clock_parent req;
1285        struct ti_sci_info *info;
1286        struct ti_sci_xfer *xfer;
1287        int ret = 0;
1288
1289        if (IS_ERR(handle))
1290                return PTR_ERR(handle);
1291        if (!handle || !parent_id)
1292                return -EINVAL;
1293
1294        info = handle_to_ti_sci_info(handle);
1295
1296        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1297                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1298                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1299        if (IS_ERR(xfer)) {
1300                ret = PTR_ERR(xfer);
1301                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1302                return ret;
1303        }
1304        req.dev_id = dev_id;
1305        req.clk_id = clk_id;
1306
1307        ret = ti_sci_do_xfer(info, xfer);
1308        if (ret) {
1309                dev_err(info->dev, "Mbox send fail %d\n", ret);
1310                return ret;
1311        }
1312
1313        resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->tx_message.buf;
1314
1315        if (!ti_sci_is_response_ack(resp))
1316                ret = -ENODEV;
1317        else
1318                *parent_id = resp->parent_id;
1319
1320        return ret;
1321}
1322
1323/**
1324 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1325 * @handle:     pointer to TI SCI handle
1326 * @dev_id:     Device identifier this request is for
1327 * @clk_id:     Clock identifier for the device for this request.
1328 *              Each device has it's own set of clock inputs. This indexes
1329 *              which clock input to modify.
1330 * @num_parents: Returns he number of parents to the current clock.
1331 *
1332 * Return: 0 if all went well, else returns appropriate error value.
1333 */
1334static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1335                                          u32 dev_id, u8 clk_id,
1336                                          u8 *num_parents)
1337{
1338        struct ti_sci_msg_resp_get_clock_num_parents *resp;
1339        struct ti_sci_msg_req_get_clock_num_parents req;
1340        struct ti_sci_info *info;
1341        struct ti_sci_xfer *xfer;
1342        int ret = 0;
1343
1344        if (IS_ERR(handle))
1345                return PTR_ERR(handle);
1346        if (!handle || !num_parents)
1347                return -EINVAL;
1348
1349        info = handle_to_ti_sci_info(handle);
1350
1351        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1352                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1353                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1354        if (IS_ERR(xfer)) {
1355                ret = PTR_ERR(xfer);
1356                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1357                return ret;
1358        }
1359        req.dev_id = dev_id;
1360        req.clk_id = clk_id;
1361
1362        ret = ti_sci_do_xfer(info, xfer);
1363        if (ret) {
1364                dev_err(info->dev, "Mbox send fail %d\n", ret);
1365                return ret;
1366        }
1367
1368        resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1369                                                        xfer->tx_message.buf;
1370
1371        if (!ti_sci_is_response_ack(resp))
1372                ret = -ENODEV;
1373        else
1374                *num_parents = resp->num_parents;
1375
1376        return ret;
1377}
1378
1379/**
1380 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1381 * @handle:     pointer to TI SCI handle
1382 * @dev_id:     Device identifier this request is for
1383 * @clk_id:     Clock identifier for the device for this request.
1384 *              Each device has it's own set of clock inputs. This indexes
1385 *              which clock input to modify.
1386 * @min_freq:   The minimum allowable frequency in Hz. This is the minimum
1387 *              allowable programmed frequency and does not account for clock
1388 *              tolerances and jitter.
1389 * @target_freq: The target clock frequency in Hz. A frequency will be
1390 *              processed as close to this target frequency as possible.
1391 * @max_freq:   The maximum allowable frequency in Hz. This is the maximum
1392 *              allowable programmed frequency and does not account for clock
1393 *              tolerances and jitter.
1394 * @match_freq: Frequency match in Hz response.
1395 *
1396 * Return: 0 if all went well, else returns appropriate error value.
1397 */
1398static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1399                                         u32 dev_id, u8 clk_id, u64 min_freq,
1400                                         u64 target_freq, u64 max_freq,
1401                                         u64 *match_freq)
1402{
1403        struct ti_sci_msg_resp_query_clock_freq *resp;
1404        struct ti_sci_msg_req_query_clock_freq req;
1405        struct ti_sci_info *info;
1406        struct ti_sci_xfer *xfer;
1407        int ret = 0;
1408
1409        if (IS_ERR(handle))
1410                return PTR_ERR(handle);
1411        if (!handle || !match_freq)
1412                return -EINVAL;
1413
1414        info = handle_to_ti_sci_info(handle);
1415
1416        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1417                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1418                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1419        if (IS_ERR(xfer)) {
1420                ret = PTR_ERR(xfer);
1421                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1422                return ret;
1423        }
1424        req.dev_id = dev_id;
1425        req.clk_id = clk_id;
1426        req.min_freq_hz = min_freq;
1427        req.target_freq_hz = target_freq;
1428        req.max_freq_hz = max_freq;
1429
1430        ret = ti_sci_do_xfer(info, xfer);
1431        if (ret) {
1432                dev_err(info->dev, "Mbox send fail %d\n", ret);
1433                return ret;
1434        }
1435
1436        resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1437
1438        if (!ti_sci_is_response_ack(resp))
1439                ret = -ENODEV;
1440        else
1441                *match_freq = resp->freq_hz;
1442
1443        return ret;
1444}
1445
1446/**
1447 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1448 * @handle:     pointer to TI SCI handle
1449 * @dev_id:     Device identifier this request is for
1450 * @clk_id:     Clock identifier for the device for this request.
1451 *              Each device has it's own set of clock inputs. This indexes
1452 *              which clock input to modify.
1453 * @min_freq:   The minimum allowable frequency in Hz. This is the minimum
1454 *              allowable programmed frequency and does not account for clock
1455 *              tolerances and jitter.
1456 * @target_freq: The target clock frequency in Hz. A frequency will be
1457 *              processed as close to this target frequency as possible.
1458 * @max_freq:   The maximum allowable frequency in Hz. This is the maximum
1459 *              allowable programmed frequency and does not account for clock
1460 *              tolerances and jitter.
1461 *
1462 * Return: 0 if all went well, else returns appropriate error value.
1463 */
1464static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1465                                   u32 dev_id, u8 clk_id, u64 min_freq,
1466                                   u64 target_freq, u64 max_freq)
1467{
1468        struct ti_sci_msg_req_set_clock_freq req;
1469        struct ti_sci_msg_hdr *resp;
1470        struct ti_sci_info *info;
1471        struct ti_sci_xfer *xfer;
1472        int ret = 0;
1473
1474        if (IS_ERR(handle))
1475                return PTR_ERR(handle);
1476        if (!handle)
1477                return -EINVAL;
1478
1479        info = handle_to_ti_sci_info(handle);
1480
1481        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1482                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1483                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1484        if (IS_ERR(xfer)) {
1485                ret = PTR_ERR(xfer);
1486                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1487                return ret;
1488        }
1489        req.dev_id = dev_id;
1490        req.clk_id = clk_id;
1491        req.min_freq_hz = min_freq;
1492        req.target_freq_hz = target_freq;
1493        req.max_freq_hz = max_freq;
1494
1495        ret = ti_sci_do_xfer(info, xfer);
1496        if (ret) {
1497                dev_err(info->dev, "Mbox send fail %d\n", ret);
1498                return ret;
1499        }
1500
1501        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1502
1503        if (!ti_sci_is_response_ack(resp))
1504                return -ENODEV;
1505
1506        return ret;
1507}
1508
1509/**
1510 * ti_sci_cmd_clk_get_freq() - Get current frequency
1511 * @handle:     pointer to TI SCI handle
1512 * @dev_id:     Device identifier this request is for
1513 * @clk_id:     Clock identifier for the device for this request.
1514 *              Each device has it's own set of clock inputs. This indexes
1515 *              which clock input to modify.
1516 * @freq:       Currently frequency in Hz
1517 *
1518 * Return: 0 if all went well, else returns appropriate error value.
1519 */
1520static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1521                                   u32 dev_id, u8 clk_id, u64 *freq)
1522{
1523        struct ti_sci_msg_resp_get_clock_freq *resp;
1524        struct ti_sci_msg_req_get_clock_freq req;
1525        struct ti_sci_info *info;
1526        struct ti_sci_xfer *xfer;
1527        int ret = 0;
1528
1529        if (IS_ERR(handle))
1530                return PTR_ERR(handle);
1531        if (!handle || !freq)
1532                return -EINVAL;
1533
1534        info = handle_to_ti_sci_info(handle);
1535
1536        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1537                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1538                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1539        if (IS_ERR(xfer)) {
1540                ret = PTR_ERR(xfer);
1541                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1542                return ret;
1543        }
1544        req.dev_id = dev_id;
1545        req.clk_id = clk_id;
1546
1547        ret = ti_sci_do_xfer(info, xfer);
1548        if (ret) {
1549                dev_err(info->dev, "Mbox send fail %d\n", ret);
1550                return ret;
1551        }
1552
1553        resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1554
1555        if (!ti_sci_is_response_ack(resp))
1556                ret = -ENODEV;
1557        else
1558                *freq = resp->freq_hz;
1559
1560        return ret;
1561}
1562
1563/**
1564 * ti_sci_cmd_core_reboot() - Command to request system reset
1565 * @handle:     pointer to TI SCI handle
1566 *
1567 * Return: 0 if all went well, else returns appropriate error value.
1568 */
1569static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1570{
1571        struct ti_sci_msg_req_reboot req;
1572        struct ti_sci_msg_hdr *resp;
1573        struct ti_sci_info *info;
1574        struct ti_sci_xfer *xfer;
1575        int ret = 0;
1576
1577        if (IS_ERR(handle))
1578                return PTR_ERR(handle);
1579        if (!handle)
1580                return -EINVAL;
1581
1582        info = handle_to_ti_sci_info(handle);
1583
1584        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1585                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1586                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1587        if (IS_ERR(xfer)) {
1588                ret = PTR_ERR(xfer);
1589                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1590                return ret;
1591        }
1592        req.domain = 0;
1593
1594        ret = ti_sci_do_xfer(info, xfer);
1595        if (ret) {
1596                dev_err(info->dev, "Mbox send fail %d\n", ret);
1597                return ret;
1598        }
1599
1600        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1601
1602        if (!ti_sci_is_response_ack(resp))
1603                return -ENODEV;
1604
1605        return ret;
1606}
1607
1608/**
1609 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1610 *                             to a host. Resource is uniquely identified by
1611 *                             type and subtype.
1612 * @handle:             Pointer to TISCI handle.
1613 * @dev_id:             TISCI device ID.
1614 * @subtype:            Resource assignment subtype that is being requested
1615 *                      from the given device.
1616 * @s_host:             Host processor ID to which the resources are allocated
1617 * @range_start:        Start index of the resource range
1618 * @range_num:          Number of resources in the range
1619 *
1620 * Return: 0 if all went fine, else return appropriate error.
1621 */
1622static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1623                                     u32 dev_id, u8 subtype, u8 s_host,
1624                                     u16 *range_start, u16 *range_num)
1625{
1626        struct ti_sci_msg_resp_get_resource_range *resp;
1627        struct ti_sci_msg_req_get_resource_range req;
1628        struct ti_sci_xfer *xfer;
1629        struct ti_sci_info *info;
1630        int ret = 0;
1631
1632        if (IS_ERR(handle))
1633                return PTR_ERR(handle);
1634        if (!handle)
1635                return -EINVAL;
1636
1637        info = handle_to_ti_sci_info(handle);
1638
1639        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1640                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1641                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1642        if (IS_ERR(xfer)) {
1643                ret = PTR_ERR(xfer);
1644                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1645                return ret;
1646        }
1647
1648        req.secondary_host = s_host;
1649        req.type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
1650        req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1651
1652        ret = ti_sci_do_xfer(info, xfer);
1653        if (ret) {
1654                dev_err(info->dev, "Mbox send fail %d\n", ret);
1655                goto fail;
1656        }
1657
1658        resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
1659        if (!ti_sci_is_response_ack(resp)) {
1660                ret = -ENODEV;
1661        } else if (!resp->range_start && !resp->range_num) {
1662                ret = -ENODEV;
1663        } else {
1664                *range_start = resp->range_start;
1665                *range_num = resp->range_num;
1666        };
1667
1668fail:
1669        return ret;
1670}
1671
1672static int __maybe_unused
1673ti_sci_cmd_get_resource_range_static(const struct ti_sci_handle *handle,
1674                                     u32 dev_id, u8 subtype,
1675                                     u16 *range_start, u16 *range_num)
1676{
1677        struct ti_sci_resource_static_data *data;
1678        int i = 0;
1679
1680        while (1) {
1681                data = &rm_static_data[i];
1682
1683                if (!data->dev_id)
1684                        return -EINVAL;
1685
1686                if (data->dev_id != dev_id || data->subtype != subtype) {
1687                        i++;
1688                        continue;
1689                }
1690
1691                *range_start = data->range_start;
1692                *range_num = data->range_num;
1693
1694                return 0;
1695        }
1696
1697        return -EINVAL;
1698}
1699
1700/**
1701 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1702 *                                 that is same as ti sci interface host.
1703 * @handle:             Pointer to TISCI handle.
1704 * @dev_id:             TISCI device ID.
1705 * @subtype:            Resource assignment subtype that is being requested
1706 *                      from the given device.
1707 * @range_start:        Start index of the resource range
1708 * @range_num:          Number of resources in the range
1709 *
1710 * Return: 0 if all went fine, else return appropriate error.
1711 */
1712static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1713                                         u32 dev_id, u8 subtype,
1714                                         u16 *range_start, u16 *range_num)
1715{
1716        return ti_sci_get_resource_range(handle, dev_id, subtype,
1717                                         TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1718                                         range_start, range_num);
1719}
1720
1721/**
1722 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1723 *                                            assigned to a specified host.
1724 * @handle:             Pointer to TISCI handle.
1725 * @dev_id:             TISCI device ID.
1726 * @subtype:            Resource assignment subtype that is being requested
1727 *                      from the given device.
1728 * @s_host:             Host processor ID to which the resources are allocated
1729 * @range_start:        Start index of the resource range
1730 * @range_num:          Number of resources in the range
1731 *
1732 * Return: 0 if all went fine, else return appropriate error.
1733 */
1734static
1735int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1736                                             u32 dev_id, u8 subtype, u8 s_host,
1737                                             u16 *range_start, u16 *range_num)
1738{
1739        return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1740                                         range_start, range_num);
1741}
1742
1743/**
1744 * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
1745 * @handle:             pointer to TI SCI handle
1746 * @msms_start:         MSMC start as returned by tisci
1747 * @msmc_end:           MSMC end as returned by tisci
1748 *
1749 * Return: 0 if all went well, else returns appropriate error value.
1750 */
1751static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1752                                 u64 *msmc_start, u64 *msmc_end)
1753{
1754        struct ti_sci_msg_resp_query_msmc *resp;
1755        struct ti_sci_msg_hdr req;
1756        struct ti_sci_info *info;
1757        struct ti_sci_xfer *xfer;
1758        int ret = 0;
1759
1760        if (IS_ERR(handle))
1761                return PTR_ERR(handle);
1762        if (!handle)
1763                return -EINVAL;
1764
1765        info = handle_to_ti_sci_info(handle);
1766
1767        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1768                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1769                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1770        if (IS_ERR(xfer)) {
1771                ret = PTR_ERR(xfer);
1772                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1773                return ret;
1774        }
1775
1776        ret = ti_sci_do_xfer(info, xfer);
1777        if (ret) {
1778                dev_err(info->dev, "Mbox send fail %d\n", ret);
1779                return ret;
1780        }
1781
1782        resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1783
1784        if (!ti_sci_is_response_ack(resp))
1785                return -ENODEV;
1786
1787        *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1788                        resp->msmc_start_low;
1789        *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1790                        resp->msmc_end_low;
1791
1792        return ret;
1793}
1794
1795/**
1796 * ti_sci_cmd_proc_request() - Command to request a physical processor control
1797 * @handle:     Pointer to TI SCI handle
1798 * @proc_id:    Processor ID this request is for
1799 *
1800 * Return: 0 if all went well, else returns appropriate error value.
1801 */
1802static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1803                                   u8 proc_id)
1804{
1805        struct ti_sci_msg_req_proc_request req;
1806        struct ti_sci_msg_hdr *resp;
1807        struct ti_sci_info *info;
1808        struct ti_sci_xfer *xfer;
1809        int ret = 0;
1810
1811        if (IS_ERR(handle))
1812                return PTR_ERR(handle);
1813        if (!handle)
1814                return -EINVAL;
1815
1816        info = handle_to_ti_sci_info(handle);
1817
1818        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1819                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1820                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1821        if (IS_ERR(xfer)) {
1822                ret = PTR_ERR(xfer);
1823                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1824                return ret;
1825        }
1826        req.processor_id = proc_id;
1827
1828        ret = ti_sci_do_xfer(info, xfer);
1829        if (ret) {
1830                dev_err(info->dev, "Mbox send fail %d\n", ret);
1831                return ret;
1832        }
1833
1834        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1835
1836        if (!ti_sci_is_response_ack(resp))
1837                ret = -ENODEV;
1838
1839        return ret;
1840}
1841
1842/**
1843 * ti_sci_cmd_proc_release() - Command to release a physical processor control
1844 * @handle:     Pointer to TI SCI handle
1845 * @proc_id:    Processor ID this request is for
1846 *
1847 * Return: 0 if all went well, else returns appropriate error value.
1848 */
1849static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1850                                   u8 proc_id)
1851{
1852        struct ti_sci_msg_req_proc_release req;
1853        struct ti_sci_msg_hdr *resp;
1854        struct ti_sci_info *info;
1855        struct ti_sci_xfer *xfer;
1856        int ret = 0;
1857
1858        if (IS_ERR(handle))
1859                return PTR_ERR(handle);
1860        if (!handle)
1861                return -EINVAL;
1862
1863        info = handle_to_ti_sci_info(handle);
1864
1865        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1866                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1867                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1868        if (IS_ERR(xfer)) {
1869                ret = PTR_ERR(xfer);
1870                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1871                return ret;
1872        }
1873        req.processor_id = proc_id;
1874
1875        ret = ti_sci_do_xfer(info, xfer);
1876        if (ret) {
1877                dev_err(info->dev, "Mbox send fail %d\n", ret);
1878                return ret;
1879        }
1880
1881        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1882
1883        if (!ti_sci_is_response_ack(resp))
1884                ret = -ENODEV;
1885
1886        return ret;
1887}
1888
1889/**
1890 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1891 *                              control to a host in the processor's access
1892 *                              control list.
1893 * @handle:     Pointer to TI SCI handle
1894 * @proc_id:    Processor ID this request is for
1895 * @host_id:    Host ID to get the control of the processor
1896 *
1897 * Return: 0 if all went well, else returns appropriate error value.
1898 */
1899static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1900                                    u8 proc_id, u8 host_id)
1901{
1902        struct ti_sci_msg_req_proc_handover req;
1903        struct ti_sci_msg_hdr *resp;
1904        struct ti_sci_info *info;
1905        struct ti_sci_xfer *xfer;
1906        int ret = 0;
1907
1908        if (IS_ERR(handle))
1909                return PTR_ERR(handle);
1910        if (!handle)
1911                return -EINVAL;
1912
1913        info = handle_to_ti_sci_info(handle);
1914
1915        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1916                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1917                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1918        if (IS_ERR(xfer)) {
1919                ret = PTR_ERR(xfer);
1920                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1921                return ret;
1922        }
1923        req.processor_id = proc_id;
1924        req.host_id = host_id;
1925
1926        ret = ti_sci_do_xfer(info, xfer);
1927        if (ret) {
1928                dev_err(info->dev, "Mbox send fail %d\n", ret);
1929                return ret;
1930        }
1931
1932        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1933
1934        if (!ti_sci_is_response_ack(resp))
1935                ret = -ENODEV;
1936
1937        return ret;
1938}
1939
1940/**
1941 * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1942 *                                  configuration flags
1943 * @handle:             Pointer to TI SCI handle
1944 * @proc_id:            Processor ID this request is for
1945 * @config_flags_set:   Configuration flags to be set
1946 * @config_flags_clear: Configuration flags to be cleared.
1947 *
1948 * Return: 0 if all went well, else returns appropriate error value.
1949 */
1950static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1951                                        u8 proc_id, u64 bootvector,
1952                                        u32 config_flags_set,
1953                                        u32 config_flags_clear)
1954{
1955        struct ti_sci_msg_req_set_proc_boot_config req;
1956        struct ti_sci_msg_hdr *resp;
1957        struct ti_sci_info *info;
1958        struct ti_sci_xfer *xfer;
1959        int ret = 0;
1960
1961        if (IS_ERR(handle))
1962                return PTR_ERR(handle);
1963        if (!handle)
1964                return -EINVAL;
1965
1966        info = handle_to_ti_sci_info(handle);
1967
1968        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1969                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1970                                     (u32 *)&req, sizeof(req), sizeof(*resp));
1971        if (IS_ERR(xfer)) {
1972                ret = PTR_ERR(xfer);
1973                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1974                return ret;
1975        }
1976        req.processor_id = proc_id;
1977        req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1978        req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1979                                TISCI_ADDR_HIGH_SHIFT;
1980        req.config_flags_set = config_flags_set;
1981        req.config_flags_clear = config_flags_clear;
1982
1983        ret = ti_sci_do_xfer(info, xfer);
1984        if (ret) {
1985                dev_err(info->dev, "Mbox send fail %d\n", ret);
1986                return ret;
1987        }
1988
1989        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1990
1991        if (!ti_sci_is_response_ack(resp))
1992                ret = -ENODEV;
1993
1994        return ret;
1995}
1996
1997/**
1998 * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1999 *                                   control flags
2000 * @handle:                     Pointer to TI SCI handle
2001 * @proc_id:                    Processor ID this request is for
2002 * @control_flags_set:          Control flags to be set
2003 * @control_flags_clear:        Control flags to be cleared
2004 *
2005 * Return: 0 if all went well, else returns appropriate error value.
2006 */
2007static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
2008                                         u8 proc_id, u32 control_flags_set,
2009                                         u32 control_flags_clear)
2010{
2011        struct ti_sci_msg_req_set_proc_boot_ctrl req;
2012        struct ti_sci_msg_hdr *resp;
2013        struct ti_sci_info *info;
2014        struct ti_sci_xfer *xfer;
2015        int ret = 0;
2016
2017        if (IS_ERR(handle))
2018                return PTR_ERR(handle);
2019        if (!handle)
2020                return -EINVAL;
2021
2022        info = handle_to_ti_sci_info(handle);
2023
2024        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
2025                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2026                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2027        if (IS_ERR(xfer)) {
2028                ret = PTR_ERR(xfer);
2029                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2030                return ret;
2031        }
2032        req.processor_id = proc_id;
2033        req.control_flags_set = control_flags_set;
2034        req.control_flags_clear = control_flags_clear;
2035
2036        ret = ti_sci_do_xfer(info, xfer);
2037        if (ret) {
2038                dev_err(info->dev, "Mbox send fail %d\n", ret);
2039                return ret;
2040        }
2041
2042        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2043
2044        if (!ti_sci_is_response_ack(resp))
2045                ret = -ENODEV;
2046
2047        return ret;
2048}
2049
2050/**
2051 * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
2052 *                      image and then set the processor configuration flags.
2053 * @handle:     Pointer to TI SCI handle
2054 * @image_addr: Memory address at which payload image and certificate is
2055 *              located in memory, this is updated if the image data is
2056 *              moved during authentication.
2057 * @image_size: This is updated with the final size of the image after
2058 *              authentication.
2059 *
2060 * Return: 0 if all went well, else returns appropriate error value.
2061 */
2062static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
2063                                           u64 *image_addr, u32 *image_size)
2064{
2065        struct ti_sci_msg_req_proc_auth_boot_image req;
2066        struct ti_sci_msg_resp_proc_auth_boot_image *resp;
2067        struct ti_sci_info *info;
2068        struct ti_sci_xfer *xfer;
2069        int ret = 0;
2070
2071        if (IS_ERR(handle))
2072                return PTR_ERR(handle);
2073        if (!handle)
2074                return -EINVAL;
2075
2076        info = handle_to_ti_sci_info(handle);
2077
2078        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
2079                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2080                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2081        if (IS_ERR(xfer)) {
2082                ret = PTR_ERR(xfer);
2083                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2084                return ret;
2085        }
2086        req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
2087        req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
2088                                TISCI_ADDR_HIGH_SHIFT;
2089
2090        ret = ti_sci_do_xfer(info, xfer);
2091        if (ret) {
2092                dev_err(info->dev, "Mbox send fail %d\n", ret);
2093                return ret;
2094        }
2095
2096        resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
2097
2098        if (!ti_sci_is_response_ack(resp))
2099                return -ENODEV;
2100
2101        *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
2102                        (((u64)resp->image_addr_high <<
2103                          TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2104        *image_size = resp->image_size;
2105
2106        return ret;
2107}
2108
2109/**
2110 * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
2111 * @handle:     Pointer to TI SCI handle
2112 * @proc_id:    Processor ID this request is for
2113 *
2114 * Return: 0 if all went well, else returns appropriate error value.
2115 */
2116static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
2117                                           u8 proc_id, u64 *bv, u32 *cfg_flags,
2118                                           u32 *ctrl_flags, u32 *sts_flags)
2119{
2120        struct ti_sci_msg_resp_get_proc_boot_status *resp;
2121        struct ti_sci_msg_req_get_proc_boot_status req;
2122        struct ti_sci_info *info;
2123        struct ti_sci_xfer *xfer;
2124        int ret = 0;
2125
2126        if (IS_ERR(handle))
2127                return PTR_ERR(handle);
2128        if (!handle)
2129                return -EINVAL;
2130
2131        info = handle_to_ti_sci_info(handle);
2132
2133        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
2134                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2135                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2136        if (IS_ERR(xfer)) {
2137                ret = PTR_ERR(xfer);
2138                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2139                return ret;
2140        }
2141        req.processor_id = proc_id;
2142
2143        ret = ti_sci_do_xfer(info, xfer);
2144        if (ret) {
2145                dev_err(info->dev, "Mbox send fail %d\n", ret);
2146                return ret;
2147        }
2148
2149        resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2150                                                        xfer->tx_message.buf;
2151
2152        if (!ti_sci_is_response_ack(resp))
2153                return -ENODEV;
2154        *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2155                        (((u64)resp->bootvector_high  <<
2156                          TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2157        *cfg_flags = resp->config_flags;
2158        *ctrl_flags = resp->control_flags;
2159        *sts_flags = resp->status_flags;
2160
2161        return ret;
2162}
2163
2164/**
2165 * ti_sci_proc_wait_boot_status_no_wait() - Helper function to wait for a
2166 *                              processor boot status without requesting or
2167 *                              waiting for a response.
2168 * @proc_id:                    Processor ID this request is for
2169 * @num_wait_iterations:        Total number of iterations we will check before
2170 *                              we will timeout and give up
2171 * @num_match_iterations:       How many iterations should we have continued
2172 *                              status to account for status bits glitching.
2173 *                              This is to make sure that match occurs for
2174 *                              consecutive checks. This implies that the
2175 *                              worst case should consider that the stable
2176 *                              time should at the worst be num_wait_iterations
2177 *                              num_match_iterations to prevent timeout.
2178 * @delay_per_iteration_us:     Specifies how long to wait (in micro seconds)
2179 *                              between each status checks. This is the minimum
2180 *                              duration, and overhead of register reads and
2181 *                              checks are on top of this and can vary based on
2182 *                              varied conditions.
2183 * @delay_before_iterations_us: Specifies how long to wait (in micro seconds)
2184 *                              before the very first check in the first
2185 *                              iteration of status check loop. This is the
2186 *                              minimum duration, and overhead of register
2187 *                              reads and checks are.
2188 * @status_flags_1_set_all_wait:If non-zero, Specifies that all bits of the
2189 *                              status matching this field requested MUST be 1.
2190 * @status_flags_1_set_any_wait:If non-zero, Specifies that at least one of the
2191 *                              bits matching this field requested MUST be 1.
2192 * @status_flags_1_clr_all_wait:If non-zero, Specifies that all bits of the
2193 *                              status matching this field requested MUST be 0.
2194 * @status_flags_1_clr_any_wait:If non-zero, Specifies that at least one of the
2195 *                              bits matching this field requested MUST be 0.
2196 *
2197 * Return: 0 if all goes well, else appropriate error message
2198 */
2199static int
2200ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle *handle,
2201                                     u8 proc_id,
2202                                     u8 num_wait_iterations,
2203                                     u8 num_match_iterations,
2204                                     u8 delay_per_iteration_us,
2205                                     u8 delay_before_iterations_us,
2206                                     u32 status_flags_1_set_all_wait,
2207                                     u32 status_flags_1_set_any_wait,
2208                                     u32 status_flags_1_clr_all_wait,
2209                                     u32 status_flags_1_clr_any_wait)
2210{
2211        struct ti_sci_msg_req_wait_proc_boot_status req;
2212        struct ti_sci_info *info;
2213        struct ti_sci_xfer *xfer;
2214        int ret = 0;
2215
2216        if (IS_ERR(handle))
2217                return PTR_ERR(handle);
2218        if (!handle)
2219                return -EINVAL;
2220
2221        info = handle_to_ti_sci_info(handle);
2222
2223        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_WAIT_PROC_BOOT_STATUS,
2224                                     TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
2225                                     (u32 *)&req, sizeof(req), 0);
2226        if (IS_ERR(xfer)) {
2227                ret = PTR_ERR(xfer);
2228                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2229                return ret;
2230        }
2231        req.processor_id = proc_id;
2232        req.num_wait_iterations = num_wait_iterations;
2233        req.num_match_iterations = num_match_iterations;
2234        req.delay_per_iteration_us = delay_per_iteration_us;
2235        req.delay_before_iterations_us = delay_before_iterations_us;
2236        req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
2237        req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
2238        req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
2239        req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
2240
2241        ret = ti_sci_do_xfer(info, xfer);
2242        if (ret)
2243                dev_err(info->dev, "Mbox send fail %d\n", ret);
2244
2245        return ret;
2246}
2247
2248/**
2249 * ti_sci_cmd_proc_shutdown_no_wait() - Command to shutdown a core without
2250 *              requesting or waiting for a response. Note that this API call
2251 *              should be followed by placing the respective processor into
2252 *              either WFE or WFI mode.
2253 * @handle:     Pointer to TI SCI handle
2254 * @proc_id:    Processor ID this request is for
2255 *
2256 * Return: 0 if all went well, else returns appropriate error value.
2257 */
2258static int ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle *handle,
2259                                            u8 proc_id)
2260{
2261        int ret;
2262        struct ti_sci_info *info;
2263
2264        if (IS_ERR(handle))
2265                return PTR_ERR(handle);
2266        if (!handle)
2267                return -EINVAL;
2268
2269        info = handle_to_ti_sci_info(handle);
2270
2271        /*
2272         * Send the core boot status wait message waiting for either WFE or
2273         * WFI without requesting or waiting for a TISCI response with the
2274         * maximum wait time to give us the best chance to get to the WFE/WFI
2275         * command that should follow the invocation of this API before the
2276         * DMSC-internal processing of this command times out. Note that
2277         * waiting for the R5 WFE/WFI flags will also work on an ARMV8 type
2278         * core as the related flag bit positions are the same.
2279         */
2280        ret = ti_sci_proc_wait_boot_status_no_wait(handle, proc_id,
2281                U8_MAX, 100, U8_MAX, U8_MAX,
2282                0, PROC_BOOT_STATUS_FLAG_R5_WFE | PROC_BOOT_STATUS_FLAG_R5_WFI,
2283                0, 0);
2284        if (ret) {
2285                dev_err(info->dev, "Sending core %u wait message fail %d\n",
2286                        proc_id, ret);
2287                return ret;
2288        }
2289
2290        /*
2291         * Release a processor managed by TISCI without requesting or waiting
2292         * for a response.
2293         */
2294        ret = ti_sci_set_device_state_no_wait(handle, proc_id, 0,
2295                                              MSG_DEVICE_SW_STATE_AUTO_OFF);
2296        if (ret)
2297                dev_err(info->dev, "Sending core %u shutdown message fail %d\n",
2298                        proc_id, ret);
2299
2300        return ret;
2301}
2302
2303/**
2304 * ti_sci_cmd_ring_config() - configure RA ring
2305 * @handle:     pointer to TI SCI handle
2306 * @valid_params: Bitfield defining validity of ring configuration parameters.
2307 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2308 * @index: Ring index.
2309 * @addr_lo: The ring base address lo 32 bits
2310 * @addr_hi: The ring base address hi 32 bits
2311 * @count: Number of ring elements.
2312 * @mode: The mode of the ring
2313 * @size: The ring element size.
2314 * @order_id: Specifies the ring's bus order ID.
2315 *
2316 * Return: 0 if all went well, else returns appropriate error value.
2317 *
2318 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2319 */
2320static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2321                                  u32 valid_params, u16 nav_id, u16 index,
2322                                  u32 addr_lo, u32 addr_hi, u32 count,
2323                                  u8 mode, u8 size, u8 order_id)
2324{
2325        struct ti_sci_msg_rm_ring_cfg_resp *resp;
2326        struct ti_sci_msg_rm_ring_cfg_req req;
2327        struct ti_sci_xfer *xfer;
2328        struct ti_sci_info *info;
2329        int ret = 0;
2330
2331        if (IS_ERR(handle))
2332                return PTR_ERR(handle);
2333        if (!handle)
2334                return -EINVAL;
2335
2336        info = handle_to_ti_sci_info(handle);
2337
2338        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2339                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2340                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2341        if (IS_ERR(xfer)) {
2342                ret = PTR_ERR(xfer);
2343                dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
2344                return ret;
2345        }
2346        req.valid_params = valid_params;
2347        req.nav_id = nav_id;
2348        req.index = index;
2349        req.addr_lo = addr_lo;
2350        req.addr_hi = addr_hi;
2351        req.count = count;
2352        req.mode = mode;
2353        req.size = size;
2354        req.order_id = order_id;
2355
2356        ret = ti_sci_do_xfer(info, xfer);
2357        if (ret) {
2358                dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
2359                goto fail;
2360        }
2361
2362        resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf;
2363
2364        ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2365
2366fail:
2367        dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2368        return ret;
2369}
2370
2371static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2372                                   u32 nav_id, u32 src_thread, u32 dst_thread)
2373{
2374        struct ti_sci_msg_hdr *resp;
2375        struct ti_sci_msg_psil_pair req;
2376        struct ti_sci_xfer *xfer;
2377        struct ti_sci_info *info;
2378        int ret = 0;
2379
2380        if (IS_ERR(handle))
2381                return PTR_ERR(handle);
2382        if (!handle)
2383                return -EINVAL;
2384
2385        info = handle_to_ti_sci_info(handle);
2386
2387        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2388                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2389                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2390        if (IS_ERR(xfer)) {
2391                ret = PTR_ERR(xfer);
2392                dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2393                return ret;
2394        }
2395        req.nav_id = nav_id;
2396        req.src_thread = src_thread;
2397        req.dst_thread = dst_thread;
2398
2399        ret = ti_sci_do_xfer(info, xfer);
2400        if (ret) {
2401                dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2402                goto fail;
2403        }
2404
2405        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2406        ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2407
2408fail:
2409        dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2410                nav_id, src_thread, dst_thread, ret);
2411        return ret;
2412}
2413
2414static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2415                                     u32 nav_id, u32 src_thread, u32 dst_thread)
2416{
2417        struct ti_sci_msg_hdr *resp;
2418        struct ti_sci_msg_psil_unpair req;
2419        struct ti_sci_xfer *xfer;
2420        struct ti_sci_info *info;
2421        int ret = 0;
2422
2423        if (IS_ERR(handle))
2424                return PTR_ERR(handle);
2425        if (!handle)
2426                return -EINVAL;
2427
2428        info = handle_to_ti_sci_info(handle);
2429
2430        xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2431                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2432                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2433        if (IS_ERR(xfer)) {
2434                ret = PTR_ERR(xfer);
2435                dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2436                return ret;
2437        }
2438        req.nav_id = nav_id;
2439        req.src_thread = src_thread;
2440        req.dst_thread = dst_thread;
2441
2442        ret = ti_sci_do_xfer(info, xfer);
2443        if (ret) {
2444                dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2445                goto fail;
2446        }
2447
2448        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2449        ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2450
2451fail:
2452        dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2453                src_thread, dst_thread, ret);
2454        return ret;
2455}
2456
2457static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2458                        const struct ti_sci_handle *handle,
2459                        const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2460{
2461        struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2462        struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2463        struct ti_sci_xfer *xfer;
2464        struct ti_sci_info *info;
2465        int ret = 0;
2466
2467        if (IS_ERR(handle))
2468                return PTR_ERR(handle);
2469        if (!handle)
2470                return -EINVAL;
2471
2472        info = handle_to_ti_sci_info(handle);
2473
2474        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2475                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2476                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2477        if (IS_ERR(xfer)) {
2478                ret = PTR_ERR(xfer);
2479                dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2480                return ret;
2481        }
2482        req.valid_params = params->valid_params;
2483        req.nav_id = params->nav_id;
2484        req.index = params->index;
2485        req.tx_pause_on_err = params->tx_pause_on_err;
2486        req.tx_filt_einfo = params->tx_filt_einfo;
2487        req.tx_filt_pswords = params->tx_filt_pswords;
2488        req.tx_atype = params->tx_atype;
2489        req.tx_chan_type = params->tx_chan_type;
2490        req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2491        req.tx_fetch_size = params->tx_fetch_size;
2492        req.tx_credit_count = params->tx_credit_count;
2493        req.txcq_qnum = params->txcq_qnum;
2494        req.tx_priority = params->tx_priority;
2495        req.tx_qos = params->tx_qos;
2496        req.tx_orderid = params->tx_orderid;
2497        req.fdepth = params->fdepth;
2498        req.tx_sched_priority = params->tx_sched_priority;
2499        req.tx_burst_size = params->tx_burst_size;
2500        req.tx_tdtype = params->tx_tdtype;
2501        req.extended_ch_type = params->extended_ch_type;
2502
2503        ret = ti_sci_do_xfer(info, xfer);
2504        if (ret) {
2505                dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2506                goto fail;
2507        }
2508
2509        resp =
2510              (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf;
2511        ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2512
2513fail:
2514        dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2515        return ret;
2516}
2517
2518static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2519                        const struct ti_sci_handle *handle,
2520                        const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2521{
2522        struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2523        struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2524        struct ti_sci_xfer *xfer;
2525        struct ti_sci_info *info;
2526        int ret = 0;
2527
2528        if (IS_ERR(handle))
2529                return PTR_ERR(handle);
2530        if (!handle)
2531                return -EINVAL;
2532
2533        info = handle_to_ti_sci_info(handle);
2534
2535        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2536                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2537                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2538        if (IS_ERR(xfer)) {
2539                ret = PTR_ERR(xfer);
2540                dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2541                return ret;
2542        }
2543
2544        req.valid_params = params->valid_params;
2545        req.nav_id = params->nav_id;
2546        req.index = params->index;
2547        req.rx_fetch_size = params->rx_fetch_size;
2548        req.rxcq_qnum = params->rxcq_qnum;
2549        req.rx_priority = params->rx_priority;
2550        req.rx_qos = params->rx_qos;
2551        req.rx_orderid = params->rx_orderid;
2552        req.rx_sched_priority = params->rx_sched_priority;
2553        req.flowid_start = params->flowid_start;
2554        req.flowid_cnt = params->flowid_cnt;
2555        req.rx_pause_on_err = params->rx_pause_on_err;
2556        req.rx_atype = params->rx_atype;
2557        req.rx_chan_type = params->rx_chan_type;
2558        req.rx_ignore_short = params->rx_ignore_short;
2559        req.rx_ignore_long = params->rx_ignore_long;
2560
2561        ret = ti_sci_do_xfer(info, xfer);
2562        if (ret) {
2563                dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2564                goto fail;
2565        }
2566
2567        resp =
2568              (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf;
2569        ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2570
2571fail:
2572        dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2573        return ret;
2574}
2575
2576static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2577                        const struct ti_sci_handle *handle,
2578                        const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2579{
2580        struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2581        struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2582        struct ti_sci_xfer *xfer;
2583        struct ti_sci_info *info;
2584        int ret = 0;
2585
2586        if (IS_ERR(handle))
2587                return PTR_ERR(handle);
2588        if (!handle)
2589                return -EINVAL;
2590
2591        info = handle_to_ti_sci_info(handle);
2592
2593        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2594                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2595                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2596        if (IS_ERR(xfer)) {
2597                ret = PTR_ERR(xfer);
2598                dev_err(info->dev, "RX_FL_CFG: Message alloc failed(%d)\n",
2599                        ret);
2600                return ret;
2601        }
2602
2603        req.valid_params = params->valid_params;
2604        req.nav_id = params->nav_id;
2605        req.flow_index = params->flow_index;
2606        req.rx_einfo_present = params->rx_einfo_present;
2607        req.rx_psinfo_present = params->rx_psinfo_present;
2608        req.rx_error_handling = params->rx_error_handling;
2609        req.rx_desc_type = params->rx_desc_type;
2610        req.rx_sop_offset = params->rx_sop_offset;
2611        req.rx_dest_qnum = params->rx_dest_qnum;
2612        req.rx_src_tag_hi = params->rx_src_tag_hi;
2613        req.rx_src_tag_lo = params->rx_src_tag_lo;
2614        req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2615        req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2616        req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2617        req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2618        req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2619        req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2620        req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2621        req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2622        req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2623        req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2624        req.rx_ps_location = params->rx_ps_location;
2625
2626        ret = ti_sci_do_xfer(info, xfer);
2627        if (ret) {
2628                dev_err(info->dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2629                goto fail;
2630        }
2631
2632        resp =
2633               (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf;
2634        ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2635
2636fail:
2637        dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2638        return ret;
2639}
2640
2641/**
2642 * ti_sci_cmd_set_fwl_region() - Request for configuring a firewall region
2643 * @handle:    pointer to TI SCI handle
2644 * @region:    region configuration parameters
2645 *
2646 * Return: 0 if all went well, else returns appropriate error value.
2647 */
2648static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2649                                     const struct ti_sci_msg_fwl_region *region)
2650{
2651        struct ti_sci_msg_fwl_set_firewall_region_req req;
2652        struct ti_sci_msg_hdr *resp;
2653        struct ti_sci_info *info;
2654        struct ti_sci_xfer *xfer;
2655        int ret = 0;
2656
2657        if (IS_ERR(handle))
2658                return PTR_ERR(handle);
2659        if (!handle)
2660                return -EINVAL;
2661
2662        info = handle_to_ti_sci_info(handle);
2663
2664        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2665                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2666                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2667        if (IS_ERR(xfer)) {
2668                ret = PTR_ERR(xfer);
2669                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2670                return ret;
2671        }
2672
2673        req.fwl_id = region->fwl_id;
2674        req.region = region->region;
2675        req.n_permission_regs = region->n_permission_regs;
2676        req.control = region->control;
2677        req.permissions[0] = region->permissions[0];
2678        req.permissions[1] = region->permissions[1];
2679        req.permissions[2] = region->permissions[2];
2680        req.start_address = region->start_address;
2681        req.end_address = region->end_address;
2682
2683        ret = ti_sci_do_xfer(info, xfer);
2684        if (ret) {
2685                dev_err(info->dev, "Mbox send fail %d\n", ret);
2686                return ret;
2687        }
2688
2689        resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2690
2691        if (!ti_sci_is_response_ack(resp))
2692                return -ENODEV;
2693
2694        return 0;
2695}
2696
2697/**
2698 * ti_sci_cmd_get_fwl_region() - Request for getting a firewall region
2699 * @handle:    pointer to TI SCI handle
2700 * @region:    region configuration parameters
2701 *
2702 * Return: 0 if all went well, else returns appropriate error value.
2703 */
2704static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2705                                     struct ti_sci_msg_fwl_region *region)
2706{
2707        struct ti_sci_msg_fwl_get_firewall_region_req req;
2708        struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2709        struct ti_sci_info *info;
2710        struct ti_sci_xfer *xfer;
2711        int ret = 0;
2712
2713        if (IS_ERR(handle))
2714                return PTR_ERR(handle);
2715        if (!handle)
2716                return -EINVAL;
2717
2718        info = handle_to_ti_sci_info(handle);
2719
2720        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2721                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2722                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2723        if (IS_ERR(xfer)) {
2724                ret = PTR_ERR(xfer);
2725                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2726                return ret;
2727        }
2728
2729        req.fwl_id = region->fwl_id;
2730        req.region = region->region;
2731        req.n_permission_regs = region->n_permission_regs;
2732
2733        ret = ti_sci_do_xfer(info, xfer);
2734        if (ret) {
2735                dev_err(info->dev, "Mbox send fail %d\n", ret);
2736                return ret;
2737        }
2738
2739        resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2740
2741        if (!ti_sci_is_response_ack(resp))
2742                return -ENODEV;
2743
2744        region->fwl_id = resp->fwl_id;
2745        region->region = resp->region;
2746        region->n_permission_regs = resp->n_permission_regs;
2747        region->control = resp->control;
2748        region->permissions[0] = resp->permissions[0];
2749        region->permissions[1] = resp->permissions[1];
2750        region->permissions[2] = resp->permissions[2];
2751        region->start_address = resp->start_address;
2752        region->end_address = resp->end_address;
2753
2754        return 0;
2755}
2756
2757/**
2758 * ti_sci_cmd_change_fwl_owner() - Request for changing a firewall owner
2759 * @handle:    pointer to TI SCI handle
2760 * @region:    region configuration parameters
2761 *
2762 * Return: 0 if all went well, else returns appropriate error value.
2763 */
2764static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2765                                       struct ti_sci_msg_fwl_owner *owner)
2766{
2767        struct ti_sci_msg_fwl_change_owner_info_req req;
2768        struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2769        struct ti_sci_info *info;
2770        struct ti_sci_xfer *xfer;
2771        int ret = 0;
2772
2773        if (IS_ERR(handle))
2774                return PTR_ERR(handle);
2775        if (!handle)
2776                return -EINVAL;
2777
2778        info = handle_to_ti_sci_info(handle);
2779
2780        xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
2781                                     TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2782                                     (u32 *)&req, sizeof(req), sizeof(*resp));
2783        if (IS_ERR(xfer)) {
2784                ret = PTR_ERR(xfer);
2785                dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2786                return ret;
2787        }
2788
2789        req.fwl_id = owner->fwl_id;
2790        req.region = owner->region;
2791        req.owner_index = owner->owner_index;
2792
2793        ret = ti_sci_do_xfer(info, xfer);
2794        if (ret) {
2795                dev_err(info->dev, "Mbox send fail %d\n", ret);
2796                return ret;
2797        }
2798
2799        resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2800
2801        if (!ti_sci_is_response_ack(resp))
2802                return -ENODEV;
2803
2804        owner->fwl_id = resp->fwl_id;
2805        owner->region = resp->region;
2806        owner->owner_index = resp->owner_index;
2807        owner->owner_privid = resp->owner_privid;
2808        owner->owner_permission_bits = resp->owner_permission_bits;
2809
2810        return ret;
2811}
2812
2813/*
2814 * ti_sci_setup_ops() - Setup the operations structures
2815 * @info:       pointer to TISCI pointer
2816 */
2817static void ti_sci_setup_ops(struct ti_sci_info *info)
2818{
2819        struct ti_sci_ops *ops = &info->handle.ops;
2820        struct ti_sci_board_ops *bops = &ops->board_ops;
2821        struct ti_sci_dev_ops *dops = &ops->dev_ops;
2822        struct ti_sci_clk_ops *cops = &ops->clk_ops;
2823        struct ti_sci_core_ops *core_ops = &ops->core_ops;
2824        struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2825        struct ti_sci_proc_ops *pops = &ops->proc_ops;
2826        struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2827        struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2828        struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2829        struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
2830
2831        bops->board_config = ti_sci_cmd_set_board_config;
2832        bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2833        bops->board_config_security = ti_sci_cmd_set_board_config_security;
2834        bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
2835
2836        dops->get_device = ti_sci_cmd_get_device;
2837        dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
2838        dops->idle_device = ti_sci_cmd_idle_device;
2839        dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
2840        dops->put_device = ti_sci_cmd_put_device;
2841        dops->is_valid = ti_sci_cmd_dev_is_valid;
2842        dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2843        dops->is_idle = ti_sci_cmd_dev_is_idle;
2844        dops->is_stop = ti_sci_cmd_dev_is_stop;
2845        dops->is_on = ti_sci_cmd_dev_is_on;
2846        dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2847        dops->set_device_resets = ti_sci_cmd_set_device_resets;
2848        dops->get_device_resets = ti_sci_cmd_get_device_resets;
2849        dops->release_exclusive_devices = ti_sci_cmd_release_exclusive_devices;
2850
2851        cops->get_clock = ti_sci_cmd_get_clock;
2852        cops->idle_clock = ti_sci_cmd_idle_clock;
2853        cops->put_clock = ti_sci_cmd_put_clock;
2854        cops->is_auto = ti_sci_cmd_clk_is_auto;
2855        cops->is_on = ti_sci_cmd_clk_is_on;
2856        cops->is_off = ti_sci_cmd_clk_is_off;
2857
2858        cops->set_parent = ti_sci_cmd_clk_set_parent;
2859        cops->get_parent = ti_sci_cmd_clk_get_parent;
2860        cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2861
2862        cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2863        cops->set_freq = ti_sci_cmd_clk_set_freq;
2864        cops->get_freq = ti_sci_cmd_clk_get_freq;
2865
2866        core_ops->reboot_device = ti_sci_cmd_core_reboot;
2867        core_ops->query_msmc = ti_sci_cmd_query_msmc;
2868
2869        rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2870        rm_core_ops->get_range_from_shost =
2871                ti_sci_cmd_get_resource_range_from_shost;
2872
2873        pops->proc_request = ti_sci_cmd_proc_request;
2874        pops->proc_release = ti_sci_cmd_proc_release;
2875        pops->proc_handover = ti_sci_cmd_proc_handover;
2876        pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2877        pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2878        pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2879        pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
2880        pops->proc_shutdown_no_wait = ti_sci_cmd_proc_shutdown_no_wait;
2881
2882        rops->config = ti_sci_cmd_ring_config;
2883
2884        psilops->pair = ti_sci_cmd_rm_psil_pair;
2885        psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2886
2887        udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2888        udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2889        udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2890
2891        fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2892        fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2893        fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
2894}
2895
2896/**
2897 * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2898 * @dev:        Pointer to the SYSFW device
2899 *
2900 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2901 *         are encountered.
2902 */
2903const
2904struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2905{
2906        if (!sci_dev)
2907                return ERR_PTR(-EINVAL);
2908
2909        struct ti_sci_info *info = dev_get_priv(sci_dev);
2910
2911        if (!info)
2912                return ERR_PTR(-EINVAL);
2913
2914        struct ti_sci_handle *handle = &info->handle;
2915
2916        if (!handle)
2917                return ERR_PTR(-EINVAL);
2918
2919        return handle;
2920}
2921
2922/**
2923 * ti_sci_get_handle() - Get the TI SCI handle for a device
2924 * @dev:        Pointer to device for which we want SCI handle
2925 *
2926 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2927 *         are encountered.
2928 */
2929const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2930{
2931        if (!dev)
2932                return ERR_PTR(-EINVAL);
2933
2934        struct udevice *sci_dev = dev_get_parent(dev);
2935
2936        return ti_sci_get_handle_from_sysfw(sci_dev);
2937}
2938
2939/**
2940 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2941 * @dev:        device node
2942 * @propname:   property name containing phandle on TISCI node
2943 *
2944 * Return: pointer to handle if successful, else appropriate error value.
2945 */
2946const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2947                                                  const char *property)
2948{
2949        struct ti_sci_info *entry, *info = NULL;
2950        u32 phandle, err;
2951        ofnode node;
2952
2953        err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2954        if (err)
2955                return ERR_PTR(err);
2956
2957        node = ofnode_get_by_phandle(phandle);
2958        if (!ofnode_valid(node))
2959                return ERR_PTR(-EINVAL);
2960
2961        list_for_each_entry(entry, &ti_sci_list, list)
2962                if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2963                        info = entry;
2964                        break;
2965                }
2966
2967        if (!info)
2968                return ERR_PTR(-ENODEV);
2969
2970        return &info->handle;
2971}
2972
2973/**
2974 * ti_sci_of_to_info() - generate private data from device tree
2975 * @dev:        corresponding system controller interface device
2976 * @info:       pointer to driver specific private data
2977 *
2978 * Return: 0 if all goes good, else appropriate error message.
2979 */
2980static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2981{
2982        int ret;
2983
2984        ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2985        if (ret) {
2986                dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2987                        __func__, ret);
2988                return ret;
2989        }
2990
2991        ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2992        if (ret) {
2993                dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2994                        __func__, ret);
2995                return ret;
2996        }
2997
2998        /* Notify channel is optional. Enable only if populated */
2999        ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
3000        if (ret) {
3001                dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
3002                        __func__, ret);
3003        }
3004
3005        info->host_id = dev_read_u32_default(dev, "ti,host-id",
3006                                             info->desc->default_host_id);
3007
3008        info->is_secure = dev_read_bool(dev, "ti,secure-host");
3009
3010        return 0;
3011}
3012
3013/**
3014 * ti_sci_probe() - Basic probe
3015 * @dev:        corresponding system controller interface device
3016 *
3017 * Return: 0 if all goes good, else appropriate error message.
3018 */
3019static int ti_sci_probe(struct udevice *dev)
3020{
3021        struct ti_sci_info *info;
3022        int ret;
3023
3024        debug("%s(dev=%p)\n", __func__, dev);
3025
3026        info = dev_get_priv(dev);
3027        info->desc = (void *)dev_get_driver_data(dev);
3028
3029        ret = ti_sci_of_to_info(dev, info);
3030        if (ret) {
3031                dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
3032                return ret;
3033        }
3034
3035        info->dev = dev;
3036        info->seq = 0xA;
3037
3038        list_add_tail(&info->list, &ti_sci_list);
3039        ti_sci_setup_ops(info);
3040
3041        ret = ti_sci_cmd_get_revision(&info->handle);
3042
3043        INIT_LIST_HEAD(&info->dev_list);
3044
3045        return ret;
3046}
3047
3048/**
3049 * ti_sci_dm_probe() - Basic probe for DM to TIFS SCI
3050 * @dev:        corresponding system controller interface device
3051 *
3052 * Return: 0 if all goes good, else appropriate error message.
3053 */
3054static __maybe_unused int ti_sci_dm_probe(struct udevice *dev)
3055{
3056        struct ti_sci_rm_core_ops *rm_core_ops;
3057        struct ti_sci_rm_udmap_ops *udmap_ops;
3058        struct ti_sci_rm_ringacc_ops *rops;
3059        struct ti_sci_rm_psil_ops *psilops;
3060        struct ti_sci_ops *ops;
3061        struct ti_sci_info *info;
3062        int ret;
3063
3064        debug("%s(dev=%p)\n", __func__, dev);
3065
3066        info = dev_get_priv(dev);
3067        info->desc = (void *)dev_get_driver_data(dev);
3068
3069        ret = ti_sci_of_to_info(dev, info);
3070        if (ret) {
3071                dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
3072                return ret;
3073        }
3074
3075        info->dev = dev;
3076        info->seq = 0xA;
3077
3078        list_add_tail(&info->list, &ti_sci_list);
3079
3080        ops = &info->handle.ops;
3081
3082        rm_core_ops = &ops->rm_core_ops;
3083        rm_core_ops->get_range = ti_sci_cmd_get_resource_range_static;
3084
3085        rops = &ops->rm_ring_ops;
3086        rops->config = ti_sci_cmd_ring_config;
3087
3088        psilops = &ops->rm_psil_ops;
3089        psilops->pair = ti_sci_cmd_rm_psil_pair;
3090        psilops->unpair = ti_sci_cmd_rm_psil_unpair;
3091
3092        udmap_ops = &ops->rm_udmap_ops;
3093        udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
3094        udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
3095        udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
3096
3097        return ret;
3098}
3099
3100/*
3101 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3102 * @res:        Pointer to the TISCI resource
3103 *
3104 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3105 */
3106u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3107{
3108        u16 set, free_bit;
3109
3110        for (set = 0; set < res->sets; set++) {
3111                free_bit = find_first_zero_bit(res->desc[set].res_map,
3112                                               res->desc[set].num);
3113                if (free_bit != res->desc[set].num) {
3114                        set_bit(free_bit, res->desc[set].res_map);
3115                        return res->desc[set].start + free_bit;
3116                }
3117        }
3118
3119        return TI_SCI_RESOURCE_NULL;
3120}
3121
3122/**
3123 * ti_sci_release_resource() - Release a resource from TISCI resource.
3124 * @res:        Pointer to the TISCI resource
3125 */
3126void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3127{
3128        u16 set;
3129
3130        for (set = 0; set < res->sets; set++) {
3131                if (res->desc[set].start <= id &&
3132                    (res->desc[set].num + res->desc[set].start) > id)
3133                        clear_bit(id - res->desc[set].start,
3134                                  res->desc[set].res_map);
3135        }
3136}
3137
3138/**
3139 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3140 * @handle:     TISCI handle
3141 * @dev:        Device pointer to which the resource is assigned
3142 * @of_prop:    property name by which the resource are represented
3143 *
3144 * Note: This function expects of_prop to be in the form of tuples
3145 *      <type, subtype>. Allocates and initializes ti_sci_resource structure
3146 *      for each of_prop. Client driver can directly call
3147 *      ti_sci_(get_free, release)_resource apis for handling the resource.
3148 *
3149 * Return: Pointer to ti_sci_resource if all went well else appropriate
3150 *         error pointer.
3151 */
3152struct ti_sci_resource *
3153devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3154                            struct udevice *dev, u32 dev_id, char *of_prop)
3155{
3156        u32 resource_subtype;
3157        struct ti_sci_resource *res;
3158        bool valid_set = false;
3159        int sets, i, ret;
3160        u32 *temp;
3161
3162        res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3163        if (!res)
3164                return ERR_PTR(-ENOMEM);
3165
3166        sets = dev_read_size(dev, of_prop);
3167        if (sets < 0) {
3168                dev_err(dev, "%s resource type ids not available\n", of_prop);
3169                return ERR_PTR(sets);
3170        }
3171        temp = malloc(sets);
3172        sets /= sizeof(u32);
3173        res->sets = sets;
3174
3175        res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3176                                 GFP_KERNEL);
3177        if (!res->desc)
3178                return ERR_PTR(-ENOMEM);
3179
3180        ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
3181        if (ret)
3182                return ERR_PTR(-EINVAL);
3183
3184        for (i = 0; i < res->sets; i++) {
3185                resource_subtype = temp[i];
3186                ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3187                                                        resource_subtype,
3188                                                        &res->desc[i].start,
3189                                                        &res->desc[i].num);
3190                if (ret) {
3191                        dev_dbg(dev, "type %d subtype %d not allocated for host %d\n",
3192                                dev_id, resource_subtype,
3193                                handle_to_ti_sci_info(handle)->host_id);
3194                        res->desc[i].start = 0;
3195                        res->desc[i].num = 0;
3196                        continue;
3197                }
3198
3199                valid_set = true;
3200                dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
3201                        dev_id, resource_subtype, res->desc[i].start,
3202                        res->desc[i].num);
3203
3204                res->desc[i].res_map =
3205                        devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3206                                     sizeof(*res->desc[i].res_map), GFP_KERNEL);
3207                if (!res->desc[i].res_map)
3208                        return ERR_PTR(-ENOMEM);
3209        }
3210
3211        if (valid_set)
3212                return res;
3213
3214        return ERR_PTR(-EINVAL);
3215}
3216
3217/* Description for K2G */
3218static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3219        .default_host_id = 2,
3220        /* Conservative duration */
3221        .max_rx_timeout_ms = 10000,
3222        /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3223        .max_msgs = 20,
3224        .max_msg_size = 64,
3225};
3226
3227/* Description for AM654 */
3228static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3229        .default_host_id = 12,
3230        /* Conservative duration */
3231        .max_rx_timeout_ms = 10000,
3232        /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3233        .max_msgs = 20,
3234        .max_msg_size = 60,
3235};
3236
3237/* Description for J721e DM to DMSC communication */
3238static const struct ti_sci_desc ti_sci_dm_j721e_desc = {
3239        .default_host_id = 3,
3240        .max_rx_timeout_ms = 10000,
3241        .max_msgs = 20,
3242        .max_msg_size = 60,
3243};
3244
3245static const struct udevice_id ti_sci_ids[] = {
3246        {
3247                .compatible = "ti,k2g-sci",
3248                .data = (ulong)&ti_sci_pmmc_k2g_desc
3249        },
3250        {
3251                .compatible = "ti,am654-sci",
3252                .data = (ulong)&ti_sci_pmmc_am654_desc
3253        },
3254        { /* Sentinel */ },
3255};
3256
3257static __maybe_unused const struct udevice_id ti_sci_dm_ids[] = {
3258        {
3259                .compatible = "ti,j721e-dm-sci",
3260                .data = (ulong)&ti_sci_dm_j721e_desc
3261        },
3262        { /* Sentinel */ },
3263};
3264
3265U_BOOT_DRIVER(ti_sci) = {
3266        .name = "ti_sci",
3267        .id = UCLASS_FIRMWARE,
3268        .of_match = ti_sci_ids,
3269        .probe = ti_sci_probe,
3270        .priv_auto      = sizeof(struct ti_sci_info),
3271};
3272
3273#if IS_ENABLED(CONFIG_K3_DM_FW)
3274U_BOOT_DRIVER(ti_sci_dm) = {
3275        .name = "ti_sci_dm",
3276        .id = UCLASS_FIRMWARE,
3277        .of_match = ti_sci_dm_ids,
3278        .probe = ti_sci_dm_probe,
3279        .priv_auto = sizeof(struct ti_sci_info),
3280};
3281#endif
3282