linux/drivers/usb/usbip/stub_tx.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2003-2008 Takahiro Hirofuchi
   3 *
   4 * This is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
  17 * USA.
  18 */
  19
  20#include <linux/kthread.h>
  21#include <linux/socket.h>
  22
  23#include "usbip_common.h"
  24#include "stub.h"
  25
  26static void stub_free_priv_and_urb(struct stub_priv *priv)
  27{
  28        struct urb *urb = priv->urb;
  29
  30        kfree(urb->setup_packet);
  31        kfree(urb->transfer_buffer);
  32        list_del(&priv->list);
  33        kmem_cache_free(stub_priv_cache, priv);
  34        usb_free_urb(urb);
  35}
  36
  37/* be in spin_lock_irqsave(&sdev->priv_lock, flags) */
  38void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,
  39                             __u32 status)
  40{
  41        struct stub_unlink *unlink;
  42
  43        unlink = kzalloc(sizeof(struct stub_unlink), GFP_ATOMIC);
  44        if (!unlink) {
  45                usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC);
  46                return;
  47        }
  48
  49        unlink->seqnum = seqnum;
  50        unlink->status = status;
  51
  52        list_add_tail(&unlink->list, &sdev->unlink_tx);
  53}
  54
  55/**
  56 * stub_complete - completion handler of a usbip urb
  57 * @urb: pointer to the urb completed
  58 *
  59 * When a urb has completed, the USB core driver calls this function mostly in
  60 * the interrupt context. To return the result of a urb, the completed urb is
  61 * linked to the pending list of returning.
  62 *
  63 */
  64void stub_complete(struct urb *urb)
  65{
  66        struct stub_priv *priv = (struct stub_priv *) urb->context;
  67        struct stub_device *sdev = priv->sdev;
  68        unsigned long flags;
  69
  70        usbip_dbg_stub_tx("complete! status %d\n", urb->status);
  71
  72        switch (urb->status) {
  73        case 0:
  74                /* OK */
  75                break;
  76        case -ENOENT:
  77                dev_info(&urb->dev->dev,
  78                         "stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n");
  79                return;
  80        case -ECONNRESET:
  81                dev_info(&urb->dev->dev,
  82                         "unlinked by a call to usb_unlink_urb()\n");
  83                break;
  84        case -EPIPE:
  85                dev_info(&urb->dev->dev, "endpoint %d is stalled\n",
  86                         usb_pipeendpoint(urb->pipe));
  87                break;
  88        case -ESHUTDOWN:
  89                dev_info(&urb->dev->dev, "device removed?\n");
  90                break;
  91        default:
  92                dev_info(&urb->dev->dev,
  93                         "urb completion with non-zero status %d\n",
  94                         urb->status);
  95                break;
  96        }
  97
  98        /* link a urb to the queue of tx. */
  99        spin_lock_irqsave(&sdev->priv_lock, flags);
 100        if (sdev->ud.tcp_socket == NULL) {
 101                usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
 102                /* It will be freed in stub_device_cleanup_urbs(). */
 103        } else if (priv->unlinking) {
 104                stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
 105                stub_free_priv_and_urb(priv);
 106        } else {
 107                list_move_tail(&priv->list, &sdev->priv_tx);
 108        }
 109        spin_unlock_irqrestore(&sdev->priv_lock, flags);
 110
 111        /* wake up tx_thread */
 112        wake_up(&sdev->tx_waitq);
 113}
 114
 115static inline void setup_base_pdu(struct usbip_header_basic *base,
 116                                  __u32 command, __u32 seqnum)
 117{
 118        base->command   = command;
 119        base->seqnum    = seqnum;
 120        base->devid     = 0;
 121        base->ep        = 0;
 122        base->direction = 0;
 123}
 124
 125static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urb *urb)
 126{
 127        struct stub_priv *priv = (struct stub_priv *) urb->context;
 128
 129        setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, priv->seqnum);
 130        usbip_pack_pdu(rpdu, urb, USBIP_RET_SUBMIT, 1);
 131}
 132
 133static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
 134                                 struct stub_unlink *unlink)
 135{
 136        setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
 137        rpdu->u.ret_unlink.status = unlink->status;
 138}
 139
 140static struct stub_priv *dequeue_from_priv_tx(struct stub_device *sdev)
 141{
 142        unsigned long flags;
 143        struct stub_priv *priv, *tmp;
 144
 145        spin_lock_irqsave(&sdev->priv_lock, flags);
 146
 147        list_for_each_entry_safe(priv, tmp, &sdev->priv_tx, list) {
 148                list_move_tail(&priv->list, &sdev->priv_free);
 149                spin_unlock_irqrestore(&sdev->priv_lock, flags);
 150                return priv;
 151        }
 152
 153        spin_unlock_irqrestore(&sdev->priv_lock, flags);
 154
 155        return NULL;
 156}
 157
 158static int stub_send_ret_submit(struct stub_device *sdev)
 159{
 160        unsigned long flags;
 161        struct stub_priv *priv, *tmp;
 162
 163        struct msghdr msg;
 164        size_t txsize;
 165
 166        size_t total_size = 0;
 167
 168        while ((priv = dequeue_from_priv_tx(sdev)) != NULL) {
 169                int ret;
 170                struct urb *urb = priv->urb;
 171                struct usbip_header pdu_header;
 172                struct usbip_iso_packet_descriptor *iso_buffer = NULL;
 173                struct kvec *iov = NULL;
 174                int iovnum = 0;
 175
 176                txsize = 0;
 177                memset(&pdu_header, 0, sizeof(pdu_header));
 178                memset(&msg, 0, sizeof(msg));
 179
 180                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
 181                        iovnum = 2 + urb->number_of_packets;
 182                else
 183                        iovnum = 2;
 184
 185                iov = kcalloc(iovnum, sizeof(struct kvec), GFP_KERNEL);
 186
 187                if (!iov) {
 188                        usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC);
 189                        return -1;
 190                }
 191
 192                iovnum = 0;
 193
 194                /* 1. setup usbip_header */
 195                setup_ret_submit_pdu(&pdu_header, urb);
 196                usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
 197                                  pdu_header.base.seqnum, urb);
 198                usbip_header_correct_endian(&pdu_header, 1);
 199
 200                iov[iovnum].iov_base = &pdu_header;
 201                iov[iovnum].iov_len  = sizeof(pdu_header);
 202                iovnum++;
 203                txsize += sizeof(pdu_header);
 204
 205                /* 2. setup transfer buffer */
 206                if (usb_pipein(urb->pipe) &&
 207                    usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS &&
 208                    urb->actual_length > 0) {
 209                        iov[iovnum].iov_base = urb->transfer_buffer;
 210                        iov[iovnum].iov_len  = urb->actual_length;
 211                        iovnum++;
 212                        txsize += urb->actual_length;
 213                } else if (usb_pipein(urb->pipe) &&
 214                           usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
 215                        /*
 216                         * For isochronous packets: actual length is the sum of
 217                         * the actual length of the individual, packets, but as
 218                         * the packet offsets are not changed there will be
 219                         * padding between the packets. To optimally use the
 220                         * bandwidth the padding is not transmitted.
 221                         */
 222
 223                        int i;
 224
 225                        for (i = 0; i < urb->number_of_packets; i++) {
 226                                iov[iovnum].iov_base = urb->transfer_buffer +
 227                                        urb->iso_frame_desc[i].offset;
 228                                iov[iovnum].iov_len =
 229                                        urb->iso_frame_desc[i].actual_length;
 230                                iovnum++;
 231                                txsize += urb->iso_frame_desc[i].actual_length;
 232                        }
 233
 234                        if (txsize != sizeof(pdu_header) + urb->actual_length) {
 235                                dev_err(&sdev->udev->dev,
 236                                        "actual length of urb %d does not match iso packet sizes %zu\n",
 237                                        urb->actual_length,
 238                                        txsize-sizeof(pdu_header));
 239                                kfree(iov);
 240                                usbip_event_add(&sdev->ud,
 241                                                SDEV_EVENT_ERROR_TCP);
 242                           return -1;
 243                        }
 244                }
 245
 246                /* 3. setup iso_packet_descriptor */
 247                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
 248                        ssize_t len = 0;
 249
 250                        iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
 251                        if (!iso_buffer) {
 252                                usbip_event_add(&sdev->ud,
 253                                                SDEV_EVENT_ERROR_MALLOC);
 254                                kfree(iov);
 255                                return -1;
 256                        }
 257
 258                        iov[iovnum].iov_base = iso_buffer;
 259                        iov[iovnum].iov_len  = len;
 260                        txsize += len;
 261                        iovnum++;
 262                }
 263
 264                ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg,
 265                                                iov,  iovnum, txsize);
 266                if (ret != txsize) {
 267                        dev_err(&sdev->udev->dev,
 268                                "sendmsg failed!, retval %d for %zd\n",
 269                                ret, txsize);
 270                        kfree(iov);
 271                        kfree(iso_buffer);
 272                        usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
 273                        return -1;
 274                }
 275
 276                kfree(iov);
 277                kfree(iso_buffer);
 278
 279                total_size += txsize;
 280        }
 281
 282        spin_lock_irqsave(&sdev->priv_lock, flags);
 283        list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) {
 284                stub_free_priv_and_urb(priv);
 285        }
 286        spin_unlock_irqrestore(&sdev->priv_lock, flags);
 287
 288        return total_size;
 289}
 290
 291static struct stub_unlink *dequeue_from_unlink_tx(struct stub_device *sdev)
 292{
 293        unsigned long flags;
 294        struct stub_unlink *unlink, *tmp;
 295
 296        spin_lock_irqsave(&sdev->priv_lock, flags);
 297
 298        list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) {
 299                list_move_tail(&unlink->list, &sdev->unlink_free);
 300                spin_unlock_irqrestore(&sdev->priv_lock, flags);
 301                return unlink;
 302        }
 303
 304        spin_unlock_irqrestore(&sdev->priv_lock, flags);
 305
 306        return NULL;
 307}
 308
 309static int stub_send_ret_unlink(struct stub_device *sdev)
 310{
 311        unsigned long flags;
 312        struct stub_unlink *unlink, *tmp;
 313
 314        struct msghdr msg;
 315        struct kvec iov[1];
 316        size_t txsize;
 317
 318        size_t total_size = 0;
 319
 320        while ((unlink = dequeue_from_unlink_tx(sdev)) != NULL) {
 321                int ret;
 322                struct usbip_header pdu_header;
 323
 324                txsize = 0;
 325                memset(&pdu_header, 0, sizeof(pdu_header));
 326                memset(&msg, 0, sizeof(msg));
 327                memset(&iov, 0, sizeof(iov));
 328
 329                usbip_dbg_stub_tx("setup ret unlink %lu\n", unlink->seqnum);
 330
 331                /* 1. setup usbip_header */
 332                setup_ret_unlink_pdu(&pdu_header, unlink);
 333                usbip_header_correct_endian(&pdu_header, 1);
 334
 335                iov[0].iov_base = &pdu_header;
 336                iov[0].iov_len  = sizeof(pdu_header);
 337                txsize += sizeof(pdu_header);
 338
 339                ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov,
 340                                     1, txsize);
 341                if (ret != txsize) {
 342                        dev_err(&sdev->udev->dev,
 343                                "sendmsg failed!, retval %d for %zd\n",
 344                                ret, txsize);
 345                        usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
 346                        return -1;
 347                }
 348
 349                usbip_dbg_stub_tx("send txdata\n");
 350                total_size += txsize;
 351        }
 352
 353        spin_lock_irqsave(&sdev->priv_lock, flags);
 354
 355        list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) {
 356                list_del(&unlink->list);
 357                kfree(unlink);
 358        }
 359
 360        spin_unlock_irqrestore(&sdev->priv_lock, flags);
 361
 362        return total_size;
 363}
 364
 365int stub_tx_loop(void *data)
 366{
 367        struct usbip_device *ud = data;
 368        struct stub_device *sdev = container_of(ud, struct stub_device, ud);
 369
 370        while (!kthread_should_stop()) {
 371                if (usbip_event_happened(ud))
 372                        break;
 373
 374                /*
 375                 * send_ret_submit comes earlier than send_ret_unlink.  stub_rx
 376                 * looks at only priv_init queue. If the completion of a URB is
 377                 * earlier than the receive of CMD_UNLINK, priv is moved to
 378                 * priv_tx queue and stub_rx does not find the target priv. In
 379                 * this case, vhci_rx receives the result of the submit request
 380                 * and then receives the result of the unlink request. The
 381                 * result of the submit is given back to the usbcore as the
 382                 * completion of the unlink request. The request of the
 383                 * unlink is ignored. This is ok because a driver who calls
 384                 * usb_unlink_urb() understands the unlink was too late by
 385                 * getting the status of the given-backed URB which has the
 386                 * status of usb_submit_urb().
 387                 */
 388                if (stub_send_ret_submit(sdev) < 0)
 389                        break;
 390
 391                if (stub_send_ret_unlink(sdev) < 0)
 392                        break;
 393
 394                wait_event_interruptible(sdev->tx_waitq,
 395                                         (!list_empty(&sdev->priv_tx) ||
 396                                          !list_empty(&sdev->unlink_tx) ||
 397                                          kthread_should_stop()));
 398        }
 399
 400        return 0;
 401}
 402