linux/drivers/usb/usbip/stub_tx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2003-2008 Takahiro Hirofuchi
   4 */
   5
   6#include <linux/kthread.h>
   7#include <linux/socket.h>
   8#include <linux/scatterlist.h>
   9
  10#include "usbip_common.h"
  11#include "stub.h"
  12
  13/* be in spin_lock_irqsave(&sdev->priv_lock, flags) */
  14void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,
  15                             __u32 status)
  16{
  17        struct stub_unlink *unlink;
  18
  19        unlink = kzalloc(sizeof(struct stub_unlink), GFP_ATOMIC);
  20        if (!unlink) {
  21                usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC);
  22                return;
  23        }
  24
  25        unlink->seqnum = seqnum;
  26        unlink->status = status;
  27
  28        list_add_tail(&unlink->list, &sdev->unlink_tx);
  29}
  30
  31/**
  32 * stub_complete - completion handler of a usbip urb
  33 * @urb: pointer to the urb completed
  34 *
  35 * When a urb has completed, the USB core driver calls this function mostly in
  36 * the interrupt context. To return the result of a urb, the completed urb is
  37 * linked to the pending list of returning.
  38 *
  39 */
  40void stub_complete(struct urb *urb)
  41{
  42        struct stub_priv *priv = (struct stub_priv *) urb->context;
  43        struct stub_device *sdev = priv->sdev;
  44        unsigned long flags;
  45
  46        usbip_dbg_stub_tx("complete! status %d\n", urb->status);
  47
  48        switch (urb->status) {
  49        case 0:
  50                /* OK */
  51                break;
  52        case -ENOENT:
  53                dev_info(&urb->dev->dev,
  54                         "stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n");
  55                return;
  56        case -ECONNRESET:
  57                dev_info(&urb->dev->dev,
  58                         "unlinked by a call to usb_unlink_urb()\n");
  59                break;
  60        case -EPIPE:
  61                dev_info(&urb->dev->dev, "endpoint %d is stalled\n",
  62                         usb_pipeendpoint(urb->pipe));
  63                break;
  64        case -ESHUTDOWN:
  65                dev_info(&urb->dev->dev, "device removed?\n");
  66                break;
  67        default:
  68                dev_info(&urb->dev->dev,
  69                         "urb completion with non-zero status %d\n",
  70                         urb->status);
  71                break;
  72        }
  73
  74        /*
  75         * If the server breaks single SG request into the several URBs, the
  76         * URBs must be reassembled before sending completed URB to the vhci.
  77         * Don't wake up the tx thread until all the URBs are completed.
  78         */
  79        if (priv->sgl) {
  80                priv->completed_urbs++;
  81
  82                /* Only save the first error status */
  83                if (urb->status && !priv->urb_status)
  84                        priv->urb_status = urb->status;
  85
  86                if (priv->completed_urbs < priv->num_urbs)
  87                        return;
  88        }
  89
  90        /* link a urb to the queue of tx. */
  91        spin_lock_irqsave(&sdev->priv_lock, flags);
  92        if (sdev->ud.tcp_socket == NULL) {
  93                usbip_dbg_stub_tx("ignore urb for closed connection\n");
  94                /* It will be freed in stub_device_cleanup_urbs(). */
  95        } else if (priv->unlinking) {
  96                stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
  97                stub_free_priv_and_urb(priv);
  98        } else {
  99                list_move_tail(&priv->list, &sdev->priv_tx);
 100        }
 101        spin_unlock_irqrestore(&sdev->priv_lock, flags);
 102
 103        /* wake up tx_thread */
 104        wake_up(&sdev->tx_waitq);
 105}
 106
 107static inline void setup_base_pdu(struct usbip_header_basic *base,
 108                                  __u32 command, __u32 seqnum)
 109{
 110        base->command   = command;
 111        base->seqnum    = seqnum;
 112        base->devid     = 0;
 113        base->ep        = 0;
 114        base->direction = 0;
 115}
 116
 117static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urb *urb)
 118{
 119        struct stub_priv *priv = (struct stub_priv *) urb->context;
 120
 121        setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, priv->seqnum);
 122        usbip_pack_pdu(rpdu, urb, USBIP_RET_SUBMIT, 1);
 123}
 124
 125static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
 126                                 struct stub_unlink *unlink)
 127{
 128        setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
 129        rpdu->u.ret_unlink.status = unlink->status;
 130}
 131
 132static struct stub_priv *dequeue_from_priv_tx(struct stub_device *sdev)
 133{
 134        unsigned long flags;
 135        struct stub_priv *priv, *tmp;
 136
 137        spin_lock_irqsave(&sdev->priv_lock, flags);
 138
 139        list_for_each_entry_safe(priv, tmp, &sdev->priv_tx, list) {
 140                list_move_tail(&priv->list, &sdev->priv_free);
 141                spin_unlock_irqrestore(&sdev->priv_lock, flags);
 142                return priv;
 143        }
 144
 145        spin_unlock_irqrestore(&sdev->priv_lock, flags);
 146
 147        return NULL;
 148}
 149
 150static int stub_send_ret_submit(struct stub_device *sdev)
 151{
 152        unsigned long flags;
 153        struct stub_priv *priv, *tmp;
 154
 155        struct msghdr msg;
 156        size_t txsize;
 157
 158        size_t total_size = 0;
 159
 160        while ((priv = dequeue_from_priv_tx(sdev)) != NULL) {
 161                struct urb *urb = priv->urbs[0];
 162                struct usbip_header pdu_header;
 163                struct usbip_iso_packet_descriptor *iso_buffer = NULL;
 164                struct kvec *iov = NULL;
 165                struct scatterlist *sg;
 166                u32 actual_length = 0;
 167                int iovnum = 0;
 168                int ret;
 169                int i;
 170
 171                txsize = 0;
 172                memset(&pdu_header, 0, sizeof(pdu_header));
 173                memset(&msg, 0, sizeof(msg));
 174
 175                if (urb->actual_length > 0 && !urb->transfer_buffer &&
 176                   !urb->num_sgs) {
 177                        dev_err(&sdev->udev->dev,
 178                                "urb: actual_length %d transfer_buffer null\n",
 179                                urb->actual_length);
 180                        return -1;
 181                }
 182
 183                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
 184                        iovnum = 2 + urb->number_of_packets;
 185                else if (usb_pipein(urb->pipe) && urb->actual_length > 0 &&
 186                        urb->num_sgs)
 187                        iovnum = 1 + urb->num_sgs;
 188                else if (usb_pipein(urb->pipe) && priv->sgl)
 189                        iovnum = 1 + priv->num_urbs;
 190                else
 191                        iovnum = 2;
 192
 193                iov = kcalloc(iovnum, sizeof(struct kvec), GFP_KERNEL);
 194
 195                if (!iov) {
 196                        usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC);
 197                        return -1;
 198                }
 199
 200                iovnum = 0;
 201
 202                /* 1. setup usbip_header */
 203                setup_ret_submit_pdu(&pdu_header, urb);
 204                usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
 205                                  pdu_header.base.seqnum);
 206
 207                if (priv->sgl) {
 208                        for (i = 0; i < priv->num_urbs; i++)
 209                                actual_length += priv->urbs[i]->actual_length;
 210
 211                        pdu_header.u.ret_submit.status = priv->urb_status;
 212                        pdu_header.u.ret_submit.actual_length = actual_length;
 213                }
 214
 215                usbip_header_correct_endian(&pdu_header, 1);
 216
 217                iov[iovnum].iov_base = &pdu_header;
 218                iov[iovnum].iov_len  = sizeof(pdu_header);
 219                iovnum++;
 220                txsize += sizeof(pdu_header);
 221
 222                /* 2. setup transfer buffer */
 223                if (usb_pipein(urb->pipe) && priv->sgl) {
 224                        /* If the server split a single SG request into several
 225                         * URBs because the server's HCD doesn't support SG,
 226                         * reassemble the split URB buffers into a single
 227                         * return command.
 228                         */
 229                        for (i = 0; i < priv->num_urbs; i++) {
 230                                iov[iovnum].iov_base =
 231                                        priv->urbs[i]->transfer_buffer;
 232                                iov[iovnum].iov_len =
 233                                        priv->urbs[i]->actual_length;
 234                                iovnum++;
 235                        }
 236                        txsize += actual_length;
 237                } else if (usb_pipein(urb->pipe) &&
 238                    usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS &&
 239                    urb->actual_length > 0) {
 240                        if (urb->num_sgs) {
 241                                unsigned int copy = urb->actual_length;
 242                                int size;
 243
 244                                for_each_sg(urb->sg, sg, urb->num_sgs, i) {
 245                                        if (copy == 0)
 246                                                break;
 247
 248                                        if (copy < sg->length)
 249                                                size = copy;
 250                                        else
 251                                                size = sg->length;
 252
 253                                        iov[iovnum].iov_base = sg_virt(sg);
 254                                        iov[iovnum].iov_len = size;
 255
 256                                        iovnum++;
 257                                        copy -= size;
 258                                }
 259                        } else {
 260                                iov[iovnum].iov_base = urb->transfer_buffer;
 261                                iov[iovnum].iov_len  = urb->actual_length;
 262                                iovnum++;
 263                        }
 264                        txsize += urb->actual_length;
 265                } else if (usb_pipein(urb->pipe) &&
 266                           usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
 267                        /*
 268                         * For isochronous packets: actual length is the sum of
 269                         * the actual length of the individual, packets, but as
 270                         * the packet offsets are not changed there will be
 271                         * padding between the packets. To optimally use the
 272                         * bandwidth the padding is not transmitted.
 273                         */
 274
 275                        int i;
 276
 277                        for (i = 0; i < urb->number_of_packets; i++) {
 278                                iov[iovnum].iov_base = urb->transfer_buffer +
 279                                        urb->iso_frame_desc[i].offset;
 280                                iov[iovnum].iov_len =
 281                                        urb->iso_frame_desc[i].actual_length;
 282                                iovnum++;
 283                                txsize += urb->iso_frame_desc[i].actual_length;
 284                        }
 285
 286                        if (txsize != sizeof(pdu_header) + urb->actual_length) {
 287                                dev_err(&sdev->udev->dev,
 288                                        "actual length of urb %d does not match iso packet sizes %zu\n",
 289                                        urb->actual_length,
 290                                        txsize-sizeof(pdu_header));
 291                                kfree(iov);
 292                                usbip_event_add(&sdev->ud,
 293                                                SDEV_EVENT_ERROR_TCP);
 294                                return -1;
 295                        }
 296                }
 297
 298                /* 3. setup iso_packet_descriptor */
 299                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
 300                        ssize_t len = 0;
 301
 302                        iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
 303                        if (!iso_buffer) {
 304                                usbip_event_add(&sdev->ud,
 305                                                SDEV_EVENT_ERROR_MALLOC);
 306                                kfree(iov);
 307                                return -1;
 308                        }
 309
 310                        iov[iovnum].iov_base = iso_buffer;
 311                        iov[iovnum].iov_len  = len;
 312                        txsize += len;
 313                        iovnum++;
 314                }
 315
 316                ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg,
 317                                                iov,  iovnum, txsize);
 318                if (ret != txsize) {
 319                        dev_err(&sdev->udev->dev,
 320                                "sendmsg failed!, retval %d for %zd\n",
 321                                ret, txsize);
 322                        kfree(iov);
 323                        kfree(iso_buffer);
 324                        usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
 325                        return -1;
 326                }
 327
 328                kfree(iov);
 329                kfree(iso_buffer);
 330
 331                total_size += txsize;
 332        }
 333
 334        spin_lock_irqsave(&sdev->priv_lock, flags);
 335        list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) {
 336                stub_free_priv_and_urb(priv);
 337        }
 338        spin_unlock_irqrestore(&sdev->priv_lock, flags);
 339
 340        return total_size;
 341}
 342
 343static struct stub_unlink *dequeue_from_unlink_tx(struct stub_device *sdev)
 344{
 345        unsigned long flags;
 346        struct stub_unlink *unlink, *tmp;
 347
 348        spin_lock_irqsave(&sdev->priv_lock, flags);
 349
 350        list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) {
 351                list_move_tail(&unlink->list, &sdev->unlink_free);
 352                spin_unlock_irqrestore(&sdev->priv_lock, flags);
 353                return unlink;
 354        }
 355
 356        spin_unlock_irqrestore(&sdev->priv_lock, flags);
 357
 358        return NULL;
 359}
 360
 361static int stub_send_ret_unlink(struct stub_device *sdev)
 362{
 363        unsigned long flags;
 364        struct stub_unlink *unlink, *tmp;
 365
 366        struct msghdr msg;
 367        struct kvec iov[1];
 368        size_t txsize;
 369
 370        size_t total_size = 0;
 371
 372        while ((unlink = dequeue_from_unlink_tx(sdev)) != NULL) {
 373                int ret;
 374                struct usbip_header pdu_header;
 375
 376                txsize = 0;
 377                memset(&pdu_header, 0, sizeof(pdu_header));
 378                memset(&msg, 0, sizeof(msg));
 379                memset(&iov, 0, sizeof(iov));
 380
 381                usbip_dbg_stub_tx("setup ret unlink %lu\n", unlink->seqnum);
 382
 383                /* 1. setup usbip_header */
 384                setup_ret_unlink_pdu(&pdu_header, unlink);
 385                usbip_header_correct_endian(&pdu_header, 1);
 386
 387                iov[0].iov_base = &pdu_header;
 388                iov[0].iov_len  = sizeof(pdu_header);
 389                txsize += sizeof(pdu_header);
 390
 391                ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov,
 392                                     1, txsize);
 393                if (ret != txsize) {
 394                        dev_err(&sdev->udev->dev,
 395                                "sendmsg failed!, retval %d for %zd\n",
 396                                ret, txsize);
 397                        usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
 398                        return -1;
 399                }
 400
 401                usbip_dbg_stub_tx("send txdata\n");
 402                total_size += txsize;
 403        }
 404
 405        spin_lock_irqsave(&sdev->priv_lock, flags);
 406
 407        list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) {
 408                list_del(&unlink->list);
 409                kfree(unlink);
 410        }
 411
 412        spin_unlock_irqrestore(&sdev->priv_lock, flags);
 413
 414        return total_size;
 415}
 416
 417int stub_tx_loop(void *data)
 418{
 419        struct usbip_device *ud = data;
 420        struct stub_device *sdev = container_of(ud, struct stub_device, ud);
 421
 422        while (!kthread_should_stop()) {
 423                if (usbip_event_happened(ud))
 424                        break;
 425
 426                /*
 427                 * send_ret_submit comes earlier than send_ret_unlink.  stub_rx
 428                 * looks at only priv_init queue. If the completion of a URB is
 429                 * earlier than the receive of CMD_UNLINK, priv is moved to
 430                 * priv_tx queue and stub_rx does not find the target priv. In
 431                 * this case, vhci_rx receives the result of the submit request
 432                 * and then receives the result of the unlink request. The
 433                 * result of the submit is given back to the usbcore as the
 434                 * completion of the unlink request. The request of the
 435                 * unlink is ignored. This is ok because a driver who calls
 436                 * usb_unlink_urb() understands the unlink was too late by
 437                 * getting the status of the given-backed URB which has the
 438                 * status of usb_submit_urb().
 439                 */
 440                if (stub_send_ret_submit(sdev) < 0)
 441                        break;
 442
 443                if (stub_send_ret_unlink(sdev) < 0)
 444                        break;
 445
 446                wait_event_interruptible(sdev->tx_waitq,
 447                                         (!list_empty(&sdev->priv_tx) ||
 448                                          !list_empty(&sdev->unlink_tx) ||
 449                                          kthread_should_stop()));
 450        }
 451
 452        return 0;
 453}
 454