linux/sound/xen/xen_snd_front.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2
   3/*
   4 * Xen para-virtual sound device
   5 *
   6 * Copyright (C) 2016-2018 EPAM Systems Inc.
   7 *
   8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/module.h>
  13
  14#include <xen/page.h>
  15#include <xen/platform_pci.h>
  16#include <xen/xen.h>
  17#include <xen/xenbus.h>
  18
  19#include <xen/xen-front-pgdir-shbuf.h>
  20#include <xen/interface/io/sndif.h>
  21
  22#include "xen_snd_front.h"
  23#include "xen_snd_front_alsa.h"
  24#include "xen_snd_front_evtchnl.h"
  25
  26static struct xensnd_req *
  27be_stream_prepare_req(struct xen_snd_front_evtchnl *evtchnl, u8 operation)
  28{
  29        struct xensnd_req *req;
  30
  31        req = RING_GET_REQUEST(&evtchnl->u.req.ring,
  32                               evtchnl->u.req.ring.req_prod_pvt);
  33        req->operation = operation;
  34        req->id = evtchnl->evt_next_id++;
  35        evtchnl->evt_id = req->id;
  36        return req;
  37}
  38
  39static int be_stream_do_io(struct xen_snd_front_evtchnl *evtchnl)
  40{
  41        if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
  42                return -EIO;
  43
  44        reinit_completion(&evtchnl->u.req.completion);
  45        xen_snd_front_evtchnl_flush(evtchnl);
  46        return 0;
  47}
  48
  49static int be_stream_wait_io(struct xen_snd_front_evtchnl *evtchnl)
  50{
  51        if (wait_for_completion_timeout(&evtchnl->u.req.completion,
  52                        msecs_to_jiffies(VSND_WAIT_BACK_MS)) <= 0)
  53                return -ETIMEDOUT;
  54
  55        return evtchnl->u.req.resp_status;
  56}
  57
  58int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
  59                                        struct xensnd_query_hw_param *hw_param_req,
  60                                        struct xensnd_query_hw_param *hw_param_resp)
  61{
  62        struct xensnd_req *req;
  63        int ret;
  64
  65        mutex_lock(&evtchnl->u.req.req_io_lock);
  66
  67        mutex_lock(&evtchnl->ring_io_lock);
  68        req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY);
  69        req->op.hw_param = *hw_param_req;
  70        mutex_unlock(&evtchnl->ring_io_lock);
  71
  72        ret = be_stream_do_io(evtchnl);
  73
  74        if (ret == 0)
  75                ret = be_stream_wait_io(evtchnl);
  76
  77        if (ret == 0)
  78                *hw_param_resp = evtchnl->u.req.resp.hw_param;
  79
  80        mutex_unlock(&evtchnl->u.req.req_io_lock);
  81        return ret;
  82}
  83
  84int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
  85                                 struct xen_front_pgdir_shbuf *shbuf,
  86                                 u8 format, unsigned int channels,
  87                                 unsigned int rate, u32 buffer_sz,
  88                                 u32 period_sz)
  89{
  90        struct xensnd_req *req;
  91        int ret;
  92
  93        mutex_lock(&evtchnl->u.req.req_io_lock);
  94
  95        mutex_lock(&evtchnl->ring_io_lock);
  96        req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN);
  97        req->op.open.pcm_format = format;
  98        req->op.open.pcm_channels = channels;
  99        req->op.open.pcm_rate = rate;
 100        req->op.open.buffer_sz = buffer_sz;
 101        req->op.open.period_sz = period_sz;
 102        req->op.open.gref_directory =
 103                xen_front_pgdir_shbuf_get_dir_start(shbuf);
 104        mutex_unlock(&evtchnl->ring_io_lock);
 105
 106        ret = be_stream_do_io(evtchnl);
 107
 108        if (ret == 0)
 109                ret = be_stream_wait_io(evtchnl);
 110
 111        mutex_unlock(&evtchnl->u.req.req_io_lock);
 112        return ret;
 113}
 114
 115int xen_snd_front_stream_close(struct xen_snd_front_evtchnl *evtchnl)
 116{
 117        struct xensnd_req *req;
 118        int ret;
 119
 120        mutex_lock(&evtchnl->u.req.req_io_lock);
 121
 122        mutex_lock(&evtchnl->ring_io_lock);
 123        req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE);
 124        mutex_unlock(&evtchnl->ring_io_lock);
 125
 126        ret = be_stream_do_io(evtchnl);
 127
 128        if (ret == 0)
 129                ret = be_stream_wait_io(evtchnl);
 130
 131        mutex_unlock(&evtchnl->u.req.req_io_lock);
 132        return ret;
 133}
 134
 135int xen_snd_front_stream_write(struct xen_snd_front_evtchnl *evtchnl,
 136                               unsigned long pos, unsigned long count)
 137{
 138        struct xensnd_req *req;
 139        int ret;
 140
 141        mutex_lock(&evtchnl->u.req.req_io_lock);
 142
 143        mutex_lock(&evtchnl->ring_io_lock);
 144        req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE);
 145        req->op.rw.length = count;
 146        req->op.rw.offset = pos;
 147        mutex_unlock(&evtchnl->ring_io_lock);
 148
 149        ret = be_stream_do_io(evtchnl);
 150
 151        if (ret == 0)
 152                ret = be_stream_wait_io(evtchnl);
 153
 154        mutex_unlock(&evtchnl->u.req.req_io_lock);
 155        return ret;
 156}
 157
 158int xen_snd_front_stream_read(struct xen_snd_front_evtchnl *evtchnl,
 159                              unsigned long pos, unsigned long count)
 160{
 161        struct xensnd_req *req;
 162        int ret;
 163
 164        mutex_lock(&evtchnl->u.req.req_io_lock);
 165
 166        mutex_lock(&evtchnl->ring_io_lock);
 167        req = be_stream_prepare_req(evtchnl, XENSND_OP_READ);
 168        req->op.rw.length = count;
 169        req->op.rw.offset = pos;
 170        mutex_unlock(&evtchnl->ring_io_lock);
 171
 172        ret = be_stream_do_io(evtchnl);
 173
 174        if (ret == 0)
 175                ret = be_stream_wait_io(evtchnl);
 176
 177        mutex_unlock(&evtchnl->u.req.req_io_lock);
 178        return ret;
 179}
 180
 181int xen_snd_front_stream_trigger(struct xen_snd_front_evtchnl *evtchnl,
 182                                 int type)
 183{
 184        struct xensnd_req *req;
 185        int ret;
 186
 187        mutex_lock(&evtchnl->u.req.req_io_lock);
 188
 189        mutex_lock(&evtchnl->ring_io_lock);
 190        req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER);
 191        req->op.trigger.type = type;
 192        mutex_unlock(&evtchnl->ring_io_lock);
 193
 194        ret = be_stream_do_io(evtchnl);
 195
 196        if (ret == 0)
 197                ret = be_stream_wait_io(evtchnl);
 198
 199        mutex_unlock(&evtchnl->u.req.req_io_lock);
 200        return ret;
 201}
 202
 203static void xen_snd_drv_fini(struct xen_snd_front_info *front_info)
 204{
 205        xen_snd_front_alsa_fini(front_info);
 206        xen_snd_front_evtchnl_free_all(front_info);
 207}
 208
 209static int sndback_initwait(struct xen_snd_front_info *front_info)
 210{
 211        int num_streams;
 212        int ret;
 213
 214        ret = xen_snd_front_cfg_card(front_info, &num_streams);
 215        if (ret < 0)
 216                return ret;
 217
 218        /* create event channels for all streams and publish */
 219        ret = xen_snd_front_evtchnl_create_all(front_info, num_streams);
 220        if (ret < 0)
 221                return ret;
 222
 223        return xen_snd_front_evtchnl_publish_all(front_info);
 224}
 225
 226static int sndback_connect(struct xen_snd_front_info *front_info)
 227{
 228        return xen_snd_front_alsa_init(front_info);
 229}
 230
 231static void sndback_disconnect(struct xen_snd_front_info *front_info)
 232{
 233        xen_snd_drv_fini(front_info);
 234        xenbus_switch_state(front_info->xb_dev, XenbusStateInitialising);
 235}
 236
 237static void sndback_changed(struct xenbus_device *xb_dev,
 238                            enum xenbus_state backend_state)
 239{
 240        struct xen_snd_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
 241        int ret;
 242
 243        dev_dbg(&xb_dev->dev, "Backend state is %s, front is %s\n",
 244                xenbus_strstate(backend_state),
 245                xenbus_strstate(xb_dev->state));
 246
 247        switch (backend_state) {
 248        case XenbusStateReconfiguring:
 249                /* fall through */
 250        case XenbusStateReconfigured:
 251                /* fall through */
 252        case XenbusStateInitialised:
 253                /* fall through */
 254                break;
 255
 256        case XenbusStateInitialising:
 257                /* Recovering after backend unexpected closure. */
 258                sndback_disconnect(front_info);
 259                break;
 260
 261        case XenbusStateInitWait:
 262                /* Recovering after backend unexpected closure. */
 263                sndback_disconnect(front_info);
 264
 265                ret = sndback_initwait(front_info);
 266                if (ret < 0)
 267                        xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
 268                else
 269                        xenbus_switch_state(xb_dev, XenbusStateInitialised);
 270                break;
 271
 272        case XenbusStateConnected:
 273                if (xb_dev->state != XenbusStateInitialised)
 274                        break;
 275
 276                ret = sndback_connect(front_info);
 277                if (ret < 0)
 278                        xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
 279                else
 280                        xenbus_switch_state(xb_dev, XenbusStateConnected);
 281                break;
 282
 283        case XenbusStateClosing:
 284                /*
 285                 * In this state backend starts freeing resources,
 286                 * so let it go into closed state first, so we can also
 287                 * remove ours.
 288                 */
 289                break;
 290
 291        case XenbusStateUnknown:
 292                /* fall through */
 293        case XenbusStateClosed:
 294                if (xb_dev->state == XenbusStateClosed)
 295                        break;
 296
 297                sndback_disconnect(front_info);
 298                break;
 299        }
 300}
 301
 302static int xen_drv_probe(struct xenbus_device *xb_dev,
 303                         const struct xenbus_device_id *id)
 304{
 305        struct xen_snd_front_info *front_info;
 306
 307        front_info = devm_kzalloc(&xb_dev->dev,
 308                                  sizeof(*front_info), GFP_KERNEL);
 309        if (!front_info)
 310                return -ENOMEM;
 311
 312        front_info->xb_dev = xb_dev;
 313        dev_set_drvdata(&xb_dev->dev, front_info);
 314
 315        return xenbus_switch_state(xb_dev, XenbusStateInitialising);
 316}
 317
 318static int xen_drv_remove(struct xenbus_device *dev)
 319{
 320        struct xen_snd_front_info *front_info = dev_get_drvdata(&dev->dev);
 321        int to = 100;
 322
 323        xenbus_switch_state(dev, XenbusStateClosing);
 324
 325        /*
 326         * On driver removal it is disconnected from XenBus,
 327         * so no backend state change events come via .otherend_changed
 328         * callback. This prevents us from exiting gracefully, e.g.
 329         * signaling the backend to free event channels, waiting for its
 330         * state to change to XenbusStateClosed and cleaning at our end.
 331         * Normally when front driver removed backend will finally go into
 332         * XenbusStateInitWait state.
 333         *
 334         * Workaround: read backend's state manually and wait with time-out.
 335         */
 336        while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
 337                                     XenbusStateUnknown) != XenbusStateInitWait) &&
 338               --to)
 339                msleep(10);
 340
 341        if (!to) {
 342                unsigned int state;
 343
 344                state = xenbus_read_unsigned(front_info->xb_dev->otherend,
 345                                             "state", XenbusStateUnknown);
 346                pr_err("Backend state is %s while removing driver\n",
 347                       xenbus_strstate(state));
 348        }
 349
 350        xen_snd_drv_fini(front_info);
 351        xenbus_frontend_closed(dev);
 352        return 0;
 353}
 354
 355static const struct xenbus_device_id xen_drv_ids[] = {
 356        { XENSND_DRIVER_NAME },
 357        { "" }
 358};
 359
 360static struct xenbus_driver xen_driver = {
 361        .ids = xen_drv_ids,
 362        .probe = xen_drv_probe,
 363        .remove = xen_drv_remove,
 364        .otherend_changed = sndback_changed,
 365};
 366
 367static int __init xen_drv_init(void)
 368{
 369        if (!xen_domain())
 370                return -ENODEV;
 371
 372        if (!xen_has_pv_devices())
 373                return -ENODEV;
 374
 375        /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
 376        if (XEN_PAGE_SIZE != PAGE_SIZE) {
 377                pr_err(XENSND_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
 378                       XEN_PAGE_SIZE, PAGE_SIZE);
 379                return -ENODEV;
 380        }
 381
 382        pr_info("Initialising Xen " XENSND_DRIVER_NAME " frontend driver\n");
 383        return xenbus_register_frontend(&xen_driver);
 384}
 385
 386static void __exit xen_drv_fini(void)
 387{
 388        pr_info("Unregistering Xen " XENSND_DRIVER_NAME " frontend driver\n");
 389        xenbus_unregister_driver(&xen_driver);
 390}
 391
 392module_init(xen_drv_init);
 393module_exit(xen_drv_fini);
 394
 395MODULE_DESCRIPTION("Xen virtual sound device frontend");
 396MODULE_LICENSE("GPL");
 397MODULE_ALIAS("xen:" XENSND_DRIVER_NAME);
 398MODULE_SUPPORTED_DEVICE("{{ALSA,Virtual soundcard}}");
 399