linux/drivers/firmware/arm_scmi/virtio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Virtio Transport driver for Arm System Control and Management Interface
   4 * (SCMI).
   5 *
   6 * Copyright (C) 2020-2021 OpenSynergy.
   7 * Copyright (C) 2021 ARM Ltd.
   8 */
   9
  10/**
  11 * DOC: Theory of Operation
  12 *
  13 * The scmi-virtio transport implements a driver for the virtio SCMI device.
  14 *
  15 * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx
  16 * channel (virtio eventq, P2A channel). Each channel is implemented through a
  17 * virtqueue. Access to each virtqueue is protected by spinlocks.
  18 */
  19
  20#include <linux/errno.h>
  21#include <linux/slab.h>
  22#include <linux/virtio.h>
  23#include <linux/virtio_config.h>
  24
  25#include <uapi/linux/virtio_ids.h>
  26#include <uapi/linux/virtio_scmi.h>
  27
  28#include "common.h"
  29
  30#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
  31#define VIRTIO_SCMI_MAX_PDU_SIZE \
  32        (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
  33#define DESCRIPTORS_PER_TX_MSG 2
  34
  35/**
  36 * struct scmi_vio_channel - Transport channel information
  37 *
  38 * @vqueue: Associated virtqueue
  39 * @cinfo: SCMI Tx or Rx channel
  40 * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
  41 * @is_rx: Whether channel is an Rx channel
  42 * @ready: Whether transport user is ready to hear about channel
  43 * @max_msg: Maximum number of pending messages for this channel.
  44 * @lock: Protects access to all members except ready.
  45 * @ready_lock: Protects access to ready. If required, it must be taken before
  46 *              lock.
  47 */
  48struct scmi_vio_channel {
  49        struct virtqueue *vqueue;
  50        struct scmi_chan_info *cinfo;
  51        struct list_head free_list;
  52        bool is_rx;
  53        bool ready;
  54        unsigned int max_msg;
  55        /* lock to protect access to all members except ready. */
  56        spinlock_t lock;
  57        /* lock to rotects access to ready flag. */
  58        spinlock_t ready_lock;
  59};
  60
  61/**
  62 * struct scmi_vio_msg - Transport PDU information
  63 *
  64 * @request: SDU used for commands
  65 * @input: SDU used for (delayed) responses and notifications
  66 * @list: List which scmi_vio_msg may be part of
  67 * @rx_len: Input SDU size in bytes, once input has been received
  68 */
  69struct scmi_vio_msg {
  70        struct scmi_msg_payld *request;
  71        struct scmi_msg_payld *input;
  72        struct list_head list;
  73        unsigned int rx_len;
  74};
  75
  76/* Only one SCMI VirtIO device can possibly exist */
  77static struct virtio_device *scmi_vdev;
  78
  79static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
  80{
  81        return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
  82}
  83
  84static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
  85                               struct scmi_vio_msg *msg)
  86{
  87        struct scatterlist sg_in;
  88        int rc;
  89        unsigned long flags;
  90
  91        sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
  92
  93        spin_lock_irqsave(&vioch->lock, flags);
  94
  95        rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
  96        if (rc)
  97                dev_err_once(vioch->cinfo->dev,
  98                             "failed to add to virtqueue (%d)\n", rc);
  99        else
 100                virtqueue_kick(vioch->vqueue);
 101
 102        spin_unlock_irqrestore(&vioch->lock, flags);
 103
 104        return rc;
 105}
 106
 107static void scmi_finalize_message(struct scmi_vio_channel *vioch,
 108                                  struct scmi_vio_msg *msg)
 109{
 110        if (vioch->is_rx) {
 111                scmi_vio_feed_vq_rx(vioch, msg);
 112        } else {
 113                /* Here IRQs are assumed to be already disabled by the caller */
 114                spin_lock(&vioch->lock);
 115                list_add(&msg->list, &vioch->free_list);
 116                spin_unlock(&vioch->lock);
 117        }
 118}
 119
 120static void scmi_vio_complete_cb(struct virtqueue *vqueue)
 121{
 122        unsigned long ready_flags;
 123        unsigned int length;
 124        struct scmi_vio_channel *vioch;
 125        struct scmi_vio_msg *msg;
 126        bool cb_enabled = true;
 127
 128        if (WARN_ON_ONCE(!vqueue->vdev->priv))
 129                return;
 130        vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
 131
 132        for (;;) {
 133                spin_lock_irqsave(&vioch->ready_lock, ready_flags);
 134
 135                if (!vioch->ready) {
 136                        if (!cb_enabled)
 137                                (void)virtqueue_enable_cb(vqueue);
 138                        goto unlock_ready_out;
 139                }
 140
 141                /* IRQs already disabled here no need to irqsave */
 142                spin_lock(&vioch->lock);
 143                if (cb_enabled) {
 144                        virtqueue_disable_cb(vqueue);
 145                        cb_enabled = false;
 146                }
 147                msg = virtqueue_get_buf(vqueue, &length);
 148                if (!msg) {
 149                        if (virtqueue_enable_cb(vqueue))
 150                                goto unlock_out;
 151                        cb_enabled = true;
 152                }
 153                spin_unlock(&vioch->lock);
 154
 155                if (msg) {
 156                        msg->rx_len = length;
 157                        scmi_rx_callback(vioch->cinfo,
 158                                         msg_read_header(msg->input), msg);
 159
 160                        scmi_finalize_message(vioch, msg);
 161                }
 162
 163                /*
 164                 * Release ready_lock and re-enable IRQs between loop iterations
 165                 * to allow virtio_chan_free() to possibly kick in and set the
 166                 * flag vioch->ready to false even in between processing of
 167                 * messages, so as to force outstanding messages to be ignored
 168                 * when system is shutting down.
 169                 */
 170                spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
 171        }
 172
 173unlock_out:
 174        spin_unlock(&vioch->lock);
 175unlock_ready_out:
 176        spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
 177}
 178
 179static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
 180
 181static vq_callback_t *scmi_vio_complete_callbacks[] = {
 182        scmi_vio_complete_cb,
 183        scmi_vio_complete_cb
 184};
 185
 186static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
 187{
 188        struct scmi_vio_channel *vioch = base_cinfo->transport_info;
 189
 190        return vioch->max_msg;
 191}
 192
 193static int virtio_link_supplier(struct device *dev)
 194{
 195        if (!scmi_vdev) {
 196                dev_notice_once(dev,
 197                                "Deferring probe after not finding a bound scmi-virtio device\n");
 198                return -EPROBE_DEFER;
 199        }
 200
 201        if (!device_link_add(dev, &scmi_vdev->dev,
 202                             DL_FLAG_AUTOREMOVE_CONSUMER)) {
 203                dev_err(dev, "Adding link to supplier virtio device failed\n");
 204                return -ECANCELED;
 205        }
 206
 207        return 0;
 208}
 209
 210static bool virtio_chan_available(struct device *dev, int idx)
 211{
 212        struct scmi_vio_channel *channels, *vioch = NULL;
 213
 214        if (WARN_ON_ONCE(!scmi_vdev))
 215                return false;
 216
 217        channels = (struct scmi_vio_channel *)scmi_vdev->priv;
 218
 219        switch (idx) {
 220        case VIRTIO_SCMI_VQ_TX:
 221                vioch = &channels[VIRTIO_SCMI_VQ_TX];
 222                break;
 223        case VIRTIO_SCMI_VQ_RX:
 224                if (scmi_vio_have_vq_rx(scmi_vdev))
 225                        vioch = &channels[VIRTIO_SCMI_VQ_RX];
 226                break;
 227        default:
 228                return false;
 229        }
 230
 231        return vioch && !vioch->cinfo;
 232}
 233
 234static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
 235                             bool tx)
 236{
 237        unsigned long flags;
 238        struct scmi_vio_channel *vioch;
 239        int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
 240        int i;
 241
 242        if (!scmi_vdev)
 243                return -EPROBE_DEFER;
 244
 245        vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
 246
 247        for (i = 0; i < vioch->max_msg; i++) {
 248                struct scmi_vio_msg *msg;
 249
 250                msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL);
 251                if (!msg)
 252                        return -ENOMEM;
 253
 254                if (tx) {
 255                        msg->request = devm_kzalloc(cinfo->dev,
 256                                                    VIRTIO_SCMI_MAX_PDU_SIZE,
 257                                                    GFP_KERNEL);
 258                        if (!msg->request)
 259                                return -ENOMEM;
 260                }
 261
 262                msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE,
 263                                          GFP_KERNEL);
 264                if (!msg->input)
 265                        return -ENOMEM;
 266
 267                if (tx) {
 268                        spin_lock_irqsave(&vioch->lock, flags);
 269                        list_add_tail(&msg->list, &vioch->free_list);
 270                        spin_unlock_irqrestore(&vioch->lock, flags);
 271                } else {
 272                        scmi_vio_feed_vq_rx(vioch, msg);
 273                }
 274        }
 275
 276        spin_lock_irqsave(&vioch->lock, flags);
 277        cinfo->transport_info = vioch;
 278        /* Indirectly setting channel not available any more */
 279        vioch->cinfo = cinfo;
 280        spin_unlock_irqrestore(&vioch->lock, flags);
 281
 282        spin_lock_irqsave(&vioch->ready_lock, flags);
 283        vioch->ready = true;
 284        spin_unlock_irqrestore(&vioch->ready_lock, flags);
 285
 286        return 0;
 287}
 288
 289static int virtio_chan_free(int id, void *p, void *data)
 290{
 291        unsigned long flags;
 292        struct scmi_chan_info *cinfo = p;
 293        struct scmi_vio_channel *vioch = cinfo->transport_info;
 294
 295        spin_lock_irqsave(&vioch->ready_lock, flags);
 296        vioch->ready = false;
 297        spin_unlock_irqrestore(&vioch->ready_lock, flags);
 298
 299        scmi_free_channel(cinfo, data, id);
 300
 301        spin_lock_irqsave(&vioch->lock, flags);
 302        vioch->cinfo = NULL;
 303        spin_unlock_irqrestore(&vioch->lock, flags);
 304
 305        return 0;
 306}
 307
 308static int virtio_send_message(struct scmi_chan_info *cinfo,
 309                               struct scmi_xfer *xfer)
 310{
 311        struct scmi_vio_channel *vioch = cinfo->transport_info;
 312        struct scatterlist sg_out;
 313        struct scatterlist sg_in;
 314        struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in };
 315        unsigned long flags;
 316        int rc;
 317        struct scmi_vio_msg *msg;
 318
 319        spin_lock_irqsave(&vioch->lock, flags);
 320
 321        if (list_empty(&vioch->free_list)) {
 322                spin_unlock_irqrestore(&vioch->lock, flags);
 323                return -EBUSY;
 324        }
 325
 326        msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
 327        list_del(&msg->list);
 328
 329        msg_tx_prepare(msg->request, xfer);
 330
 331        sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
 332        sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
 333
 334        rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
 335        if (rc) {
 336                list_add(&msg->list, &vioch->free_list);
 337                dev_err_once(vioch->cinfo->dev,
 338                             "%s() failed to add to virtqueue (%d)\n", __func__,
 339                             rc);
 340        } else {
 341                virtqueue_kick(vioch->vqueue);
 342        }
 343
 344        spin_unlock_irqrestore(&vioch->lock, flags);
 345
 346        return rc;
 347}
 348
 349static void virtio_fetch_response(struct scmi_chan_info *cinfo,
 350                                  struct scmi_xfer *xfer)
 351{
 352        struct scmi_vio_msg *msg = xfer->priv;
 353
 354        if (msg) {
 355                msg_fetch_response(msg->input, msg->rx_len, xfer);
 356                xfer->priv = NULL;
 357        }
 358}
 359
 360static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
 361                                      size_t max_len, struct scmi_xfer *xfer)
 362{
 363        struct scmi_vio_msg *msg = xfer->priv;
 364
 365        if (msg) {
 366                msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
 367                xfer->priv = NULL;
 368        }
 369}
 370
 371static const struct scmi_transport_ops scmi_virtio_ops = {
 372        .link_supplier = virtio_link_supplier,
 373        .chan_available = virtio_chan_available,
 374        .chan_setup = virtio_chan_setup,
 375        .chan_free = virtio_chan_free,
 376        .get_max_msg = virtio_get_max_msg,
 377        .send_message = virtio_send_message,
 378        .fetch_response = virtio_fetch_response,
 379        .fetch_notification = virtio_fetch_notification,
 380};
 381
 382static int scmi_vio_probe(struct virtio_device *vdev)
 383{
 384        struct device *dev = &vdev->dev;
 385        struct scmi_vio_channel *channels;
 386        bool have_vq_rx;
 387        int vq_cnt;
 388        int i;
 389        int ret;
 390        struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
 391
 392        /* Only one SCMI VirtiO device allowed */
 393        if (scmi_vdev) {
 394                dev_err(dev,
 395                        "One SCMI Virtio device was already initialized: only one allowed.\n");
 396                return -EBUSY;
 397        }
 398
 399        have_vq_rx = scmi_vio_have_vq_rx(vdev);
 400        vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
 401
 402        channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL);
 403        if (!channels)
 404                return -ENOMEM;
 405
 406        if (have_vq_rx)
 407                channels[VIRTIO_SCMI_VQ_RX].is_rx = true;
 408
 409        ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks,
 410                              scmi_vio_vqueue_names, NULL);
 411        if (ret) {
 412                dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt);
 413                return ret;
 414        }
 415
 416        for (i = 0; i < vq_cnt; i++) {
 417                unsigned int sz;
 418
 419                spin_lock_init(&channels[i].lock);
 420                spin_lock_init(&channels[i].ready_lock);
 421                INIT_LIST_HEAD(&channels[i].free_list);
 422                channels[i].vqueue = vqs[i];
 423
 424                sz = virtqueue_get_vring_size(channels[i].vqueue);
 425                /* Tx messages need multiple descriptors. */
 426                if (!channels[i].is_rx)
 427                        sz /= DESCRIPTORS_PER_TX_MSG;
 428
 429                if (sz > MSG_TOKEN_MAX) {
 430                        dev_info_once(dev,
 431                                      "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
 432                                      channels[i].is_rx ? "rx" : "tx",
 433                                      sz, MSG_TOKEN_MAX);
 434                        sz = MSG_TOKEN_MAX;
 435                }
 436                channels[i].max_msg = sz;
 437        }
 438
 439        vdev->priv = channels;
 440        /* Ensure initialized scmi_vdev is visible */
 441        smp_store_mb(scmi_vdev, vdev);
 442
 443        return 0;
 444}
 445
 446static void scmi_vio_remove(struct virtio_device *vdev)
 447{
 448        /*
 449         * Once we get here, virtio_chan_free() will have already been called by
 450         * the SCMI core for any existing channel and, as a consequence, all the
 451         * virtio channels will have been already marked NOT ready, causing any
 452         * outstanding message on any vqueue to be ignored by complete_cb: now
 453         * we can just stop processing buffers and destroy the vqueues.
 454         */
 455        vdev->config->reset(vdev);
 456        vdev->config->del_vqs(vdev);
 457        /* Ensure scmi_vdev is visible as NULL */
 458        smp_store_mb(scmi_vdev, NULL);
 459}
 460
 461static int scmi_vio_validate(struct virtio_device *vdev)
 462{
 463        if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
 464                dev_err(&vdev->dev,
 465                        "device does not comply with spec version 1.x\n");
 466                return -EINVAL;
 467        }
 468
 469        return 0;
 470}
 471
 472static unsigned int features[] = {
 473        VIRTIO_SCMI_F_P2A_CHANNELS,
 474};
 475
 476static const struct virtio_device_id id_table[] = {
 477        { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
 478        { 0 }
 479};
 480
 481static struct virtio_driver virtio_scmi_driver = {
 482        .driver.name = "scmi-virtio",
 483        .driver.owner = THIS_MODULE,
 484        .feature_table = features,
 485        .feature_table_size = ARRAY_SIZE(features),
 486        .id_table = id_table,
 487        .probe = scmi_vio_probe,
 488        .remove = scmi_vio_remove,
 489        .validate = scmi_vio_validate,
 490};
 491
 492static int __init virtio_scmi_init(void)
 493{
 494        return register_virtio_driver(&virtio_scmi_driver);
 495}
 496
 497static void virtio_scmi_exit(void)
 498{
 499        unregister_virtio_driver(&virtio_scmi_driver);
 500}
 501
 502const struct scmi_desc scmi_virtio_desc = {
 503        .transport_init = virtio_scmi_init,
 504        .transport_exit = virtio_scmi_exit,
 505        .ops = &scmi_virtio_ops,
 506        .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */
 507        .max_msg = 0, /* overridden by virtio_get_max_msg() */
 508        .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
 509};
 510