linux/drivers/usb/gadget/udc/aspeed-vhub/epn.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
   4 *
   5 * epn.c - Generic endpoints management
   6 *
   7 * Copyright 2017 IBM Corporation
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/module.h>
  12#include <linux/platform_device.h>
  13#include <linux/delay.h>
  14#include <linux/ioport.h>
  15#include <linux/slab.h>
  16#include <linux/errno.h>
  17#include <linux/list.h>
  18#include <linux/interrupt.h>
  19#include <linux/proc_fs.h>
  20#include <linux/prefetch.h>
  21#include <linux/clk.h>
  22#include <linux/usb/gadget.h>
  23#include <linux/of.h>
  24#include <linux/of_gpio.h>
  25#include <linux/regmap.h>
  26#include <linux/dma-mapping.h>
  27
  28#include "vhub.h"
  29
  30#define EXTRA_CHECKS
  31
  32#ifdef EXTRA_CHECKS
  33#define CHECK(ep, expr, fmt...)                                 \
  34        do {                                                    \
  35                if (!(expr)) EPDBG(ep, "CHECK:" fmt);           \
  36        } while(0)
  37#else
  38#define CHECK(ep, expr, fmt...) do { } while(0)
  39#endif
  40
  41static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
  42{
  43        unsigned int act = req->req.actual;
  44        unsigned int len = req->req.length;
  45        unsigned int chunk;
  46
  47        /* There should be no DMA ongoing */
  48        WARN_ON(req->active);
  49
  50        /* Calculate next chunk size */
  51        chunk = len - act;
  52        if (chunk > ep->ep.maxpacket)
  53                chunk = ep->ep.maxpacket;
  54        else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
  55                req->last_desc = 1;
  56
  57        EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n",
  58               req, act, len, chunk, req->last_desc);
  59
  60        /* If DMA unavailable, using staging EP buffer */
  61        if (!req->req.dma) {
  62
  63                /* For IN transfers, copy data over first */
  64                if (ep->epn.is_in) {
  65                        memcpy(ep->buf, req->req.buf + act, chunk);
  66                        vhub_dma_workaround(ep->buf);
  67                }
  68                writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
  69        } else {
  70                if (ep->epn.is_in)
  71                        vhub_dma_workaround(req->req.buf);
  72                writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
  73        }
  74
  75        /* Start DMA */
  76        req->active = true;
  77        writel(VHUB_EP_DMA_SET_TX_SIZE(chunk),
  78               ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  79        writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK,
  80               ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  81}
  82
  83static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep)
  84{
  85        struct ast_vhub_req *req;
  86        unsigned int len;
  87        u32 stat;
  88
  89        /* Read EP status */
  90        stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
  91
  92        /* Grab current request if any */
  93        req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
  94
  95        EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n",
  96               stat, ep->epn.is_in, req, req ? req->active : 0);
  97
  98        /* In absence of a request, bail out, must have been dequeued */
  99        if (!req)
 100                return;
 101
 102        /*
 103         * Request not active, move on to processing queue, active request
 104         * was probably dequeued
 105         */
 106        if (!req->active)
 107                goto next_chunk;
 108
 109        /* Check if HW has moved on */
 110        if (VHUB_EP_DMA_RPTR(stat) != 0) {
 111                EPDBG(ep, "DMA read pointer not 0 !\n");
 112                return;
 113        }
 114
 115        /* No current DMA ongoing */
 116        req->active = false;
 117
 118        /* Grab length out of HW */
 119        len = VHUB_EP_DMA_TX_SIZE(stat);
 120
 121        /* If not using DMA, copy data out if needed */
 122        if (!req->req.dma && !ep->epn.is_in && len)
 123                memcpy(req->req.buf + req->req.actual, ep->buf, len);
 124
 125        /* Adjust size */
 126        req->req.actual += len;
 127
 128        /* Check for short packet */
 129        if (len < ep->ep.maxpacket)
 130                req->last_desc = 1;
 131
 132        /* That's it ? complete the request and pick a new one */
 133        if (req->last_desc >= 0) {
 134                ast_vhub_done(ep, req, 0);
 135                req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
 136                                               queue);
 137
 138                /*
 139                 * Due to lock dropping inside "done" the next request could
 140                 * already be active, so check for that and bail if needed.
 141                 */
 142                if (!req || req->active)
 143                        return;
 144        }
 145
 146 next_chunk:
 147        ast_vhub_epn_kick(ep, req);
 148}
 149
 150static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
 151{
 152        /*
 153         * d_next == d_last means descriptor list empty to HW,
 154         * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors
 155         * in the list
 156         */
 157        return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) &
 158                (AST_VHUB_DESCS_COUNT - 1);
 159}
 160
 161static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
 162                                   struct ast_vhub_req *req)
 163{
 164        struct ast_vhub_desc *desc = NULL;
 165        unsigned int act = req->act_count;
 166        unsigned int len = req->req.length;
 167        unsigned int chunk;
 168
 169        /* Mark request active if not already */
 170        req->active = true;
 171
 172        /* If the request was already completely written, do nothing */
 173        if (req->last_desc >= 0)
 174                return;
 175
 176        EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n",
 177               act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep));
 178
 179        /* While we can create descriptors */
 180        while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
 181                unsigned int d_num;
 182
 183                /* Grab next free descriptor */
 184                d_num = ep->epn.d_next;
 185                desc = &ep->epn.descs[d_num];
 186                ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
 187
 188                /* Calculate next chunk size */
 189                chunk = len - act;
 190                if (chunk <= ep->epn.chunk_max) {
 191                        /*
 192                         * Is this the last packet ? Because of having up to 8
 193                         * packets in a descriptor we can't just compare "chunk"
 194                         * with ep.maxpacket. We have to see if it's a multiple
 195                         * of it to know if we have to send a zero packet.
 196                         * Sadly that involves a modulo which is a bit expensive
 197                         * but probably still better than not doing it.
 198                         */
 199                        if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
 200                                req->last_desc = d_num;
 201                } else {
 202                        chunk = ep->epn.chunk_max;
 203                }
 204
 205                EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n",
 206                       act, len, chunk, req->last_desc, d_num,
 207                       ast_vhub_count_free_descs(ep));
 208
 209                /* Populate descriptor */
 210                desc->w0 = cpu_to_le32(req->req.dma + act);
 211
 212                /* Interrupt if end of request or no more descriptors */
 213
 214                /*
 215                 * TODO: Be smarter about it, if we don't have enough
 216                 * descriptors request an interrupt before queue empty
 217                 * or so in order to be able to populate more before
 218                 * the HW runs out. This isn't a problem at the moment
 219                 * as we use 256 descriptors and only put at most one
 220                 * request in the ring.
 221                 */
 222                desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk));
 223                if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
 224                        desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT);
 225
 226                /* Account packet */
 227                req->act_count = act = act + chunk;
 228        }
 229
 230        if (likely(desc))
 231                vhub_dma_workaround(desc);
 232
 233        /* Tell HW about new descriptors */
 234        writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
 235               ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
 236
 237        EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n",
 238               ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS));
 239}
 240
 241static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep)
 242{
 243        struct ast_vhub_req *req;
 244        unsigned int len, d_last;
 245        u32 stat, stat1;
 246
 247        /* Read EP status, workaround HW race */
 248        do {
 249                stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
 250                stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
 251        } while(stat != stat1);
 252
 253        /* Extract RPTR */
 254        d_last = VHUB_EP_DMA_RPTR(stat);
 255
 256        /* Grab current request if any */
 257        req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
 258
 259        EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n",
 260               stat, ep->epn.is_in, ep->epn.d_last, d_last);
 261
 262        /* Check all completed descriptors */
 263        while (ep->epn.d_last != d_last) {
 264                struct ast_vhub_desc *desc;
 265                unsigned int d_num;
 266                bool is_last_desc;
 267
 268                /* Grab next completed descriptor */
 269                d_num = ep->epn.d_last;
 270                desc = &ep->epn.descs[d_num];
 271                ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
 272
 273                /* Grab len out of descriptor */
 274                len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1));
 275
 276                EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n",
 277                       d_num, len, req, req ? req->active : 0);
 278
 279                /* If no active request pending, move on */
 280                if (!req || !req->active)
 281                        continue;
 282
 283                /* Adjust size */
 284                req->req.actual += len;
 285
 286                /* Is that the last chunk ? */
 287                is_last_desc = req->last_desc == d_num;
 288                CHECK(ep, is_last_desc == (len < ep->ep.maxpacket ||
 289                                           (req->req.actual >= req->req.length &&
 290                                            !req->req.zero)),
 291                      "Last packet discrepancy: last_desc=%d len=%d r.act=%d "
 292                      "r.len=%d r.zero=%d mp=%d\n",
 293                      is_last_desc, len, req->req.actual, req->req.length,
 294                      req->req.zero, ep->ep.maxpacket);
 295
 296                if (is_last_desc) {
 297                        /*
 298                         * Because we can only have one request at a time
 299                         * in our descriptor list in this implementation,
 300                         * d_last and ep->d_last should now be equal
 301                         */
 302                        CHECK(ep, d_last == ep->epn.d_last,
 303                              "DMA read ptr mismatch %d vs %d\n",
 304                              d_last, ep->epn.d_last);
 305
 306                        /* Note: done will drop and re-acquire the lock */
 307                        ast_vhub_done(ep, req, 0);
 308                        req = list_first_entry_or_null(&ep->queue,
 309                                                       struct ast_vhub_req,
 310                                                       queue);
 311                        break;
 312                }
 313        }
 314
 315        /* More work ? */
 316        if (req)
 317                ast_vhub_epn_kick_desc(ep, req);
 318}
 319
 320void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep)
 321{
 322        if (ep->epn.desc_mode)
 323                ast_vhub_epn_handle_ack_desc(ep);
 324        else
 325                ast_vhub_epn_handle_ack(ep);
 326}
 327
 328static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
 329                              gfp_t gfp_flags)
 330{
 331        struct ast_vhub_req *req = to_ast_req(u_req);
 332        struct ast_vhub_ep *ep = to_ast_ep(u_ep);
 333        struct ast_vhub *vhub = ep->vhub;
 334        unsigned long flags;
 335        bool empty;
 336        int rc;
 337
 338        /* Paranoid checks */
 339        if (!u_req || !u_req->complete || !u_req->buf) {
 340                dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req);
 341                if (u_req) {
 342                        dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n",
 343                                 u_req->complete, req->internal);
 344                }
 345                return -EINVAL;
 346        }
 347
 348        /* Endpoint enabled ? */
 349        if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
 350            !ep->dev->enabled) {
 351                EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
 352                return -ESHUTDOWN;
 353        }
 354
 355        /* Map request for DMA if possible. For now, the rule for DMA is
 356         * that:
 357         *
 358         *  * For single stage mode (no descriptors):
 359         *
 360         *   - The buffer is aligned to a 8 bytes boundary (HW requirement)
 361         *   - For a OUT endpoint, the request size is a multiple of the EP
 362         *     packet size (otherwise the controller will DMA past the end
 363         *     of the buffer if the host is sending a too long packet).
 364         *
 365         *  * For descriptor mode (tx only for now), always.
 366         *
 367         * We could relax the latter by making the decision to use the bounce
 368         * buffer based on the size of a given *segment* of the request rather
 369         * than the whole request.
 370         */
 371        if (ep->epn.desc_mode ||
 372            ((((unsigned long)u_req->buf & 7) == 0) &&
 373             (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
 374                rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
 375                                            ep->epn.is_in);
 376                if (rc) {
 377                        dev_warn(&vhub->pdev->dev,
 378                                 "Request mapping failure %d\n", rc);
 379                        return rc;
 380                }
 381        } else
 382                u_req->dma = 0;
 383
 384        EPVDBG(ep, "enqueue req @%p\n", req);
 385        EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n",
 386               u_req->length, (u32)u_req->dma, u_req->zero,
 387               u_req->short_not_ok, u_req->no_interrupt,
 388               ep->epn.is_in);
 389
 390        /* Initialize request progress fields */
 391        u_req->status = -EINPROGRESS;
 392        u_req->actual = 0;
 393        req->act_count = 0;
 394        req->active = false;
 395        req->last_desc = -1;
 396        spin_lock_irqsave(&vhub->lock, flags);
 397        empty = list_empty(&ep->queue);
 398
 399        /* Add request to list and kick processing if empty */
 400        list_add_tail(&req->queue, &ep->queue);
 401        if (empty) {
 402                if (ep->epn.desc_mode)
 403                        ast_vhub_epn_kick_desc(ep, req);
 404                else
 405                        ast_vhub_epn_kick(ep, req);
 406        }
 407        spin_unlock_irqrestore(&vhub->lock, flags);
 408
 409        return 0;
 410}
 411
 412static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
 413                                     bool restart_ep)
 414{
 415        u32 state, reg, loops;
 416
 417        /* Stop DMA activity */
 418        if (ep->epn.desc_mode)
 419                writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 420        else
 421                writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 422
 423        /* Wait for it to complete */
 424        for (loops = 0; loops < 1000; loops++) {
 425                state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 426                state = VHUB_EP_DMA_PROC_STATUS(state);
 427                if (state == EP_DMA_PROC_RX_IDLE ||
 428                    state == EP_DMA_PROC_TX_IDLE)
 429                        break;
 430                udelay(1);
 431        }
 432        if (loops >= 1000)
 433                dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n");
 434
 435        /* If we don't have to restart the endpoint, that's it */
 436        if (!restart_ep)
 437                return;
 438
 439        /* Restart the endpoint */
 440        if (ep->epn.desc_mode) {
 441                /*
 442                 * Take out descriptors by resetting the DMA read
 443                 * pointer to be equal to the CPU write pointer.
 444                 *
 445                 * Note: If we ever support creating descriptors for
 446                 * requests that aren't the head of the queue, we
 447                 * may have to do something more complex here,
 448                 * especially if the request being taken out is
 449                 * not the current head descriptors.
 450                 */
 451                reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) |
 452                        VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next);
 453                writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
 454
 455                /* Then turn it back on */
 456                writel(ep->epn.dma_conf,
 457                       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 458        } else {
 459                /* Single mode: just turn it back on */
 460                writel(ep->epn.dma_conf,
 461                       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 462        }
 463}
 464
 465static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
 466{
 467        struct ast_vhub_ep *ep = to_ast_ep(u_ep);
 468        struct ast_vhub *vhub = ep->vhub;
 469        struct ast_vhub_req *req;
 470        unsigned long flags;
 471        int rc = -EINVAL;
 472
 473        spin_lock_irqsave(&vhub->lock, flags);
 474
 475        /* Make sure it's actually queued on this endpoint */
 476        list_for_each_entry (req, &ep->queue, queue) {
 477                if (&req->req == u_req)
 478                        break;
 479        }
 480
 481        if (&req->req == u_req) {
 482                EPVDBG(ep, "dequeue req @%p active=%d\n",
 483                       req, req->active);
 484                if (req->active)
 485                        ast_vhub_stop_active_req(ep, true);
 486                ast_vhub_done(ep, req, -ECONNRESET);
 487                rc = 0;
 488        }
 489
 490        spin_unlock_irqrestore(&vhub->lock, flags);
 491        return rc;
 492}
 493
 494void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep)
 495{
 496        u32 reg;
 497
 498        if (WARN_ON(ep->d_idx == 0))
 499                return;
 500        reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG);
 501        if (ep->epn.stalled || ep->epn.wedged)
 502                reg |= VHUB_EP_CFG_STALL_CTRL;
 503        else
 504                reg &= ~VHUB_EP_CFG_STALL_CTRL;
 505        writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG);
 506
 507        if (!ep->epn.stalled && !ep->epn.wedged)
 508                writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
 509                       ep->vhub->regs + AST_VHUB_EP_TOGGLE);
 510}
 511
 512static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt,
 513                                      bool wedge)
 514{
 515        struct ast_vhub_ep *ep = to_ast_ep(u_ep);
 516        struct ast_vhub *vhub = ep->vhub;
 517        unsigned long flags;
 518
 519        EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge);
 520
 521        if (!u_ep || !u_ep->desc)
 522                return -EINVAL;
 523        if (ep->d_idx == 0)
 524                return 0;
 525        if (ep->epn.is_iso)
 526                return -EOPNOTSUPP;
 527
 528        spin_lock_irqsave(&vhub->lock, flags);
 529
 530        /* Fail with still-busy IN endpoints */
 531        if (halt && ep->epn.is_in && !list_empty(&ep->queue)) {
 532                spin_unlock_irqrestore(&vhub->lock, flags);
 533                return -EAGAIN;
 534        }
 535        ep->epn.stalled = halt;
 536        ep->epn.wedged = wedge;
 537        ast_vhub_update_epn_stall(ep);
 538
 539        spin_unlock_irqrestore(&vhub->lock, flags);
 540
 541        return 0;
 542}
 543
 544static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value)
 545{
 546        return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false);
 547}
 548
 549static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep)
 550{
 551        return ast_vhub_set_halt_and_wedge(u_ep, true, true);
 552}
 553
 554static int ast_vhub_epn_disable(struct usb_ep* u_ep)
 555{
 556        struct ast_vhub_ep *ep = to_ast_ep(u_ep);
 557        struct ast_vhub *vhub = ep->vhub;
 558        unsigned long flags;
 559        u32 imask, ep_ier;
 560
 561        EPDBG(ep, "Disabling !\n");
 562
 563        spin_lock_irqsave(&vhub->lock, flags);
 564
 565        ep->epn.enabled = false;
 566
 567        /* Stop active DMA if any */
 568        ast_vhub_stop_active_req(ep, false);
 569
 570        /* Disable endpoint */
 571        writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
 572
 573        /* Disable ACK interrupt */
 574        imask = VHUB_EP_IRQ(ep->epn.g_idx);
 575        ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
 576        ep_ier &= ~imask;
 577        writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
 578        writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
 579
 580        /* Nuke all pending requests */
 581        ast_vhub_nuke(ep, -ESHUTDOWN);
 582
 583        /* No more descriptor associated with request */
 584        ep->ep.desc = NULL;
 585
 586        spin_unlock_irqrestore(&vhub->lock, flags);
 587
 588        return 0;
 589}
 590
 591static int ast_vhub_epn_enable(struct usb_ep* u_ep,
 592                               const struct usb_endpoint_descriptor *desc)
 593{
 594        struct ast_vhub_ep *ep = to_ast_ep(u_ep);
 595        struct ast_vhub_dev *dev;
 596        struct ast_vhub *vhub;
 597        u16 maxpacket, type;
 598        unsigned long flags;
 599        u32 ep_conf, ep_ier, imask;
 600
 601        /* Check arguments */
 602        if (!u_ep || !desc)
 603                return -EINVAL;
 604
 605        maxpacket = usb_endpoint_maxp(desc);
 606        if (!ep->d_idx || !ep->dev ||
 607            desc->bDescriptorType != USB_DT_ENDPOINT ||
 608            maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
 609                EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n",
 610                      ep->d_idx, ep->dev, desc->bDescriptorType,
 611                      maxpacket, ep->ep.maxpacket);
 612                return -EINVAL;
 613        }
 614        if (ep->d_idx != usb_endpoint_num(desc)) {
 615                EPDBG(ep, "EP number mismatch !\n");
 616                return -EINVAL;
 617        }
 618
 619        if (ep->epn.enabled) {
 620                EPDBG(ep, "Already enabled\n");
 621                return -EBUSY;
 622        }
 623        dev = ep->dev;
 624        vhub = ep->vhub;
 625
 626        /* Check device state */
 627        if (!dev->driver) {
 628                EPDBG(ep, "Bogus device state: driver=%p speed=%d\n",
 629                       dev->driver, dev->gadget.speed);
 630                return -ESHUTDOWN;
 631        }
 632
 633        /* Grab some info from the descriptor */
 634        ep->epn.is_in = usb_endpoint_dir_in(desc);
 635        ep->ep.maxpacket = maxpacket;
 636        type = usb_endpoint_type(desc);
 637        ep->epn.d_next = ep->epn.d_last = 0;
 638        ep->epn.is_iso = false;
 639        ep->epn.stalled = false;
 640        ep->epn.wedged = false;
 641
 642        EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n",
 643              ep->epn.is_in ? "in" : "out", usb_ep_type_string(type),
 644              usb_endpoint_num(desc), maxpacket);
 645
 646        /* Can we use DMA descriptor mode ? */
 647        ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in;
 648        if (ep->epn.desc_mode)
 649                memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT);
 650
 651        /*
 652         * Large send function can send up to 8 packets from
 653         * one descriptor with a limit of 4095 bytes.
 654         */
 655        ep->epn.chunk_max = ep->ep.maxpacket;
 656        if (ep->epn.is_in) {
 657                ep->epn.chunk_max <<= 3;
 658                while (ep->epn.chunk_max > 4095)
 659                        ep->epn.chunk_max -= ep->ep.maxpacket;
 660        }
 661
 662        switch(type) {
 663        case USB_ENDPOINT_XFER_CONTROL:
 664                EPDBG(ep, "Only one control endpoint\n");
 665                return -EINVAL;
 666        case USB_ENDPOINT_XFER_INT:
 667                ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT);
 668                break;
 669        case USB_ENDPOINT_XFER_BULK:
 670                ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK);
 671                break;
 672        case USB_ENDPOINT_XFER_ISOC:
 673                ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO);
 674                ep->epn.is_iso = true;
 675                break;
 676        default:
 677                return -EINVAL;
 678        }
 679
 680        /* Encode the rest of the EP config register */
 681        if (maxpacket < 1024)
 682                ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket);
 683        if (!ep->epn.is_in)
 684                ep_conf |= VHUB_EP_CFG_DIR_OUT;
 685        ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc));
 686        ep_conf |= VHUB_EP_CFG_ENABLE;
 687        ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1);
 688        EPVDBG(ep, "config=%08x\n", ep_conf);
 689
 690        spin_lock_irqsave(&vhub->lock, flags);
 691
 692        /* Disable HW and reset DMA */
 693        writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
 694        writel(VHUB_EP_DMA_CTRL_RESET,
 695               ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 696
 697        /* Configure and enable */
 698        writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG);
 699
 700        if (ep->epn.desc_mode) {
 701                /* Clear DMA status, including the DMA read ptr */
 702                writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
 703
 704                /* Set descriptor base */
 705                writel(ep->epn.descs_dma,
 706                       ep->epn.regs + AST_VHUB_EP_DESC_BASE);
 707
 708                /* Set base DMA config value */
 709                ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE;
 710                if (ep->epn.is_in)
 711                        ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE;
 712
 713                /* First reset and disable all operations */
 714                writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
 715                       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 716
 717                /* Enable descriptor mode */
 718                writel(ep->epn.dma_conf,
 719                       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 720        } else {
 721                /* Set base DMA config value */
 722                ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE;
 723
 724                /* Reset and switch to single stage mode */
 725                writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
 726                       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 727                writel(ep->epn.dma_conf,
 728                       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 729                writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
 730        }
 731
 732        /* Cleanup data toggle just in case */
 733        writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
 734               vhub->regs + AST_VHUB_EP_TOGGLE);
 735
 736        /* Cleanup and enable ACK interrupt */
 737        imask = VHUB_EP_IRQ(ep->epn.g_idx);
 738        writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
 739        ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
 740        ep_ier |= imask;
 741        writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
 742
 743        /* Woot, we are online ! */
 744        ep->epn.enabled = true;
 745
 746        spin_unlock_irqrestore(&vhub->lock, flags);
 747
 748        return 0;
 749}
 750
 751static void ast_vhub_epn_dispose(struct usb_ep *u_ep)
 752{
 753        struct ast_vhub_ep *ep = to_ast_ep(u_ep);
 754
 755        if (WARN_ON(!ep->dev || !ep->d_idx))
 756                return;
 757
 758        EPDBG(ep, "Releasing endpoint\n");
 759
 760        /* Take it out of the EP list */
 761        list_del_init(&ep->ep.ep_list);
 762
 763        /* Mark the address free in the device */
 764        ep->dev->epns[ep->d_idx - 1] = NULL;
 765
 766        /* Free name & DMA buffers */
 767        kfree(ep->ep.name);
 768        ep->ep.name = NULL;
 769        dma_free_coherent(&ep->vhub->pdev->dev,
 770                          AST_VHUB_EPn_MAX_PACKET +
 771                          8 * AST_VHUB_DESCS_COUNT,
 772                          ep->buf, ep->buf_dma);
 773        ep->buf = NULL;
 774        ep->epn.descs = NULL;
 775
 776        /* Mark free */
 777        ep->dev = NULL;
 778}
 779
 780static const struct usb_ep_ops ast_vhub_epn_ops = {
 781        .enable         = ast_vhub_epn_enable,
 782        .disable        = ast_vhub_epn_disable,
 783        .dispose        = ast_vhub_epn_dispose,
 784        .queue          = ast_vhub_epn_queue,
 785        .dequeue        = ast_vhub_epn_dequeue,
 786        .set_halt       = ast_vhub_epn_set_halt,
 787        .set_wedge      = ast_vhub_epn_set_wedge,
 788        .alloc_request  = ast_vhub_alloc_request,
 789        .free_request   = ast_vhub_free_request,
 790};
 791
 792struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
 793{
 794        struct ast_vhub *vhub = d->vhub;
 795        struct ast_vhub_ep *ep;
 796        unsigned long flags;
 797        int i;
 798
 799        /* Find a free one (no device) */
 800        spin_lock_irqsave(&vhub->lock, flags);
 801        for (i = 0; i < vhub->max_epns; i++)
 802                if (vhub->epns[i].dev == NULL)
 803                        break;
 804        if (i >= vhub->max_epns) {
 805                spin_unlock_irqrestore(&vhub->lock, flags);
 806                return NULL;
 807        }
 808
 809        /* Set it up */
 810        ep = &vhub->epns[i];
 811        ep->dev = d;
 812        spin_unlock_irqrestore(&vhub->lock, flags);
 813
 814        DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr);
 815        INIT_LIST_HEAD(&ep->queue);
 816        ep->d_idx = addr;
 817        ep->vhub = vhub;
 818        ep->ep.ops = &ast_vhub_epn_ops;
 819        ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr);
 820        d->epns[addr-1] = ep;
 821        ep->epn.g_idx = i;
 822        ep->epn.regs = vhub->regs + 0x200 + (i * 0x10);
 823
 824        ep->buf = dma_alloc_coherent(&vhub->pdev->dev,
 825                                     AST_VHUB_EPn_MAX_PACKET +
 826                                     8 * AST_VHUB_DESCS_COUNT,
 827                                     &ep->buf_dma, GFP_KERNEL);
 828        if (!ep->buf) {
 829                kfree(ep->ep.name);
 830                ep->ep.name = NULL;
 831                return NULL;
 832        }
 833        ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET;
 834        ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET;
 835
 836        usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET);
 837        list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list);
 838        ep->ep.caps.type_iso = true;
 839        ep->ep.caps.type_bulk = true;
 840        ep->ep.caps.type_int = true;
 841        ep->ep.caps.dir_in = true;
 842        ep->ep.caps.dir_out = true;
 843
 844        return ep;
 845}
 846