linux/drivers/usb/musb/musb_cppi41.c
<<
>>
Prefs
   1#include <linux/device.h>
   2#include <linux/dma-mapping.h>
   3#include <linux/dmaengine.h>
   4#include <linux/sizes.h>
   5#include <linux/platform_device.h>
   6#include <linux/of.h>
   7
   8#include "cppi_dma.h"
   9#include "musb_core.h"
  10#include "musb_trace.h"
  11
  12#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
  13
  14#define EP_MODE_AUTOREQ_NONE            0
  15#define EP_MODE_AUTOREQ_ALL_NEOP        1
  16#define EP_MODE_AUTOREQ_ALWAYS          3
  17
  18#define EP_MODE_DMA_TRANSPARENT         0
  19#define EP_MODE_DMA_RNDIS               1
  20#define EP_MODE_DMA_GEN_RNDIS           3
  21
  22#define USB_CTRL_TX_MODE        0x70
  23#define USB_CTRL_RX_MODE        0x74
  24#define USB_CTRL_AUTOREQ        0xd0
  25#define USB_TDOWN               0xd8
  26
  27#define MUSB_DMA_NUM_CHANNELS 15
  28
  29struct cppi41_dma_controller {
  30        struct dma_controller controller;
  31        struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
  32        struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
  33        struct hrtimer early_tx;
  34        struct list_head early_tx_list;
  35        u32 rx_mode;
  36        u32 tx_mode;
  37        u32 auto_req;
  38};
  39
  40static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
  41{
  42        u16 csr;
  43        u8 toggle;
  44
  45        if (cppi41_channel->is_tx)
  46                return;
  47        if (!is_host_active(cppi41_channel->controller->controller.musb))
  48                return;
  49
  50        csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
  51        toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
  52
  53        cppi41_channel->usb_toggle = toggle;
  54}
  55
  56static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
  57{
  58        struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
  59        struct musb *musb = hw_ep->musb;
  60        u16 csr;
  61        u8 toggle;
  62
  63        if (cppi41_channel->is_tx)
  64                return;
  65        if (!is_host_active(musb))
  66                return;
  67
  68        musb_ep_select(musb->mregs, hw_ep->epnum);
  69        csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
  70        toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
  71
  72        /*
  73         * AM335x Advisory 1.0.13: Due to internal synchronisation error the
  74         * data toggle may reset from DATA1 to DATA0 during receiving data from
  75         * more than one endpoint.
  76         */
  77        if (!toggle && toggle == cppi41_channel->usb_toggle) {
  78                csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
  79                musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
  80                musb_dbg(musb, "Restoring DATA1 toggle.");
  81        }
  82
  83        cppi41_channel->usb_toggle = toggle;
  84}
  85
  86static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
  87{
  88        u8              epnum = hw_ep->epnum;
  89        struct musb     *musb = hw_ep->musb;
  90        void __iomem    *epio = musb->endpoints[epnum].regs;
  91        u16             csr;
  92
  93        musb_ep_select(musb->mregs, hw_ep->epnum);
  94        csr = musb_readw(epio, MUSB_TXCSR);
  95        if (csr & MUSB_TXCSR_TXPKTRDY)
  96                return false;
  97        return true;
  98}
  99
 100static void cppi41_dma_callback(void *private_data,
 101                                const struct dmaengine_result *result);
 102
 103static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
 104{
 105        struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
 106        struct musb *musb = hw_ep->musb;
 107        void __iomem *epio = hw_ep->regs;
 108        u16 csr;
 109
 110        if (!cppi41_channel->prog_len ||
 111            (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
 112
 113                /* done, complete */
 114                cppi41_channel->channel.actual_len =
 115                        cppi41_channel->transferred;
 116                cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
 117                cppi41_channel->channel.rx_packet_done = true;
 118
 119                /*
 120                 * transmit ZLP using PIO mode for transfers which size is
 121                 * multiple of EP packet size.
 122                 */
 123                if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
 124                                        cppi41_channel->packet_sz) == 0) {
 125                        musb_ep_select(musb->mregs, hw_ep->epnum);
 126                        csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
 127                        musb_writew(epio, MUSB_TXCSR, csr);
 128                }
 129
 130                trace_musb_cppi41_done(cppi41_channel);
 131                musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
 132        } else {
 133                /* next iteration, reload */
 134                struct dma_chan *dc = cppi41_channel->dc;
 135                struct dma_async_tx_descriptor *dma_desc;
 136                enum dma_transfer_direction direction;
 137                u32 remain_bytes;
 138
 139                cppi41_channel->buf_addr += cppi41_channel->packet_sz;
 140
 141                remain_bytes = cppi41_channel->total_len;
 142                remain_bytes -= cppi41_channel->transferred;
 143                remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
 144                cppi41_channel->prog_len = remain_bytes;
 145
 146                direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
 147                        : DMA_DEV_TO_MEM;
 148                dma_desc = dmaengine_prep_slave_single(dc,
 149                                cppi41_channel->buf_addr,
 150                                remain_bytes,
 151                                direction,
 152                                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 153                if (WARN_ON(!dma_desc))
 154                        return;
 155
 156                dma_desc->callback_result = cppi41_dma_callback;
 157                dma_desc->callback_param = &cppi41_channel->channel;
 158                cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
 159                trace_musb_cppi41_cont(cppi41_channel);
 160                dma_async_issue_pending(dc);
 161
 162                if (!cppi41_channel->is_tx) {
 163                        musb_ep_select(musb->mregs, hw_ep->epnum);
 164                        csr = musb_readw(epio, MUSB_RXCSR);
 165                        csr |= MUSB_RXCSR_H_REQPKT;
 166                        musb_writew(epio, MUSB_RXCSR, csr);
 167                }
 168        }
 169}
 170
 171static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
 172{
 173        struct cppi41_dma_controller *controller;
 174        struct cppi41_dma_channel *cppi41_channel, *n;
 175        struct musb *musb;
 176        unsigned long flags;
 177        enum hrtimer_restart ret = HRTIMER_NORESTART;
 178
 179        controller = container_of(timer, struct cppi41_dma_controller,
 180                        early_tx);
 181        musb = controller->controller.musb;
 182
 183        spin_lock_irqsave(&musb->lock, flags);
 184        list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
 185                        tx_check) {
 186                bool empty;
 187                struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
 188
 189                empty = musb_is_tx_fifo_empty(hw_ep);
 190                if (empty) {
 191                        list_del_init(&cppi41_channel->tx_check);
 192                        cppi41_trans_done(cppi41_channel);
 193                }
 194        }
 195
 196        if (!list_empty(&controller->early_tx_list) &&
 197            !hrtimer_is_queued(&controller->early_tx)) {
 198                ret = HRTIMER_RESTART;
 199                hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
 200        }
 201
 202        spin_unlock_irqrestore(&musb->lock, flags);
 203        return ret;
 204}
 205
 206static void cppi41_dma_callback(void *private_data,
 207                                const struct dmaengine_result *result)
 208{
 209        struct dma_channel *channel = private_data;
 210        struct cppi41_dma_channel *cppi41_channel = channel->private_data;
 211        struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
 212        struct cppi41_dma_controller *controller;
 213        struct musb *musb = hw_ep->musb;
 214        unsigned long flags;
 215        struct dma_tx_state txstate;
 216        u32 transferred;
 217        int is_hs = 0;
 218        bool empty;
 219
 220        controller = cppi41_channel->controller;
 221        if (controller->controller.dma_callback)
 222                controller->controller.dma_callback(&controller->controller);
 223
 224        if (result->result == DMA_TRANS_ABORTED)
 225                return;
 226
 227        spin_lock_irqsave(&musb->lock, flags);
 228
 229        dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
 230                        &txstate);
 231        transferred = cppi41_channel->prog_len - txstate.residue;
 232        cppi41_channel->transferred += transferred;
 233
 234        trace_musb_cppi41_gb(cppi41_channel);
 235        update_rx_toggle(cppi41_channel);
 236
 237        if (cppi41_channel->transferred == cppi41_channel->total_len ||
 238                        transferred < cppi41_channel->packet_sz)
 239                cppi41_channel->prog_len = 0;
 240
 241        if (cppi41_channel->is_tx) {
 242                u8 type;
 243
 244                if (is_host_active(musb))
 245                        type = hw_ep->out_qh->type;
 246                else
 247                        type = hw_ep->ep_in.type;
 248
 249                if (type == USB_ENDPOINT_XFER_ISOC)
 250                        /*
 251                         * Don't use the early-TX-interrupt workaround below
 252                         * for Isoch transfter. Since Isoch are periodic
 253                         * transfer, by the time the next transfer is
 254                         * scheduled, the current one should be done already.
 255                         *
 256                         * This avoids audio playback underrun issue.
 257                         */
 258                        empty = true;
 259                else
 260                        empty = musb_is_tx_fifo_empty(hw_ep);
 261        }
 262
 263        if (!cppi41_channel->is_tx || empty) {
 264                cppi41_trans_done(cppi41_channel);
 265                goto out;
 266        }
 267
 268        /*
 269         * On AM335x it has been observed that the TX interrupt fires
 270         * too early that means the TXFIFO is not yet empty but the DMA
 271         * engine says that it is done with the transfer. We don't
 272         * receive a FIFO empty interrupt so the only thing we can do is
 273         * to poll for the bit. On HS it usually takes 2us, on FS around
 274         * 110us - 150us depending on the transfer size.
 275         * We spin on HS (no longer than than 25us and setup a timer on
 276         * FS to check for the bit and complete the transfer.
 277         */
 278        if (is_host_active(musb)) {
 279                if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
 280                        is_hs = 1;
 281        } else {
 282                if (musb->g.speed == USB_SPEED_HIGH)
 283                        is_hs = 1;
 284        }
 285        if (is_hs) {
 286                unsigned wait = 25;
 287
 288                do {
 289                        empty = musb_is_tx_fifo_empty(hw_ep);
 290                        if (empty) {
 291                                cppi41_trans_done(cppi41_channel);
 292                                goto out;
 293                        }
 294                        wait--;
 295                        if (!wait)
 296                                break;
 297                        cpu_relax();
 298                } while (1);
 299        }
 300        list_add_tail(&cppi41_channel->tx_check,
 301                        &controller->early_tx_list);
 302        if (!hrtimer_is_queued(&controller->early_tx)) {
 303                unsigned long usecs = cppi41_channel->total_len / 10;
 304
 305                hrtimer_start_range_ns(&controller->early_tx,
 306                                       usecs * NSEC_PER_USEC,
 307                                       20 * NSEC_PER_USEC,
 308                                       HRTIMER_MODE_REL);
 309        }
 310
 311out:
 312        spin_unlock_irqrestore(&musb->lock, flags);
 313}
 314
 315static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
 316{
 317        unsigned shift;
 318
 319        shift = (ep - 1) * 2;
 320        old &= ~(3 << shift);
 321        old |= mode << shift;
 322        return old;
 323}
 324
 325static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
 326                unsigned mode)
 327{
 328        struct cppi41_dma_controller *controller = cppi41_channel->controller;
 329        struct musb *musb = controller->controller.musb;
 330        u32 port;
 331        u32 new_mode;
 332        u32 old_mode;
 333
 334        if (cppi41_channel->is_tx)
 335                old_mode = controller->tx_mode;
 336        else
 337                old_mode = controller->rx_mode;
 338        port = cppi41_channel->port_num;
 339        new_mode = update_ep_mode(port, mode, old_mode);
 340
 341        if (new_mode == old_mode)
 342                return;
 343        if (cppi41_channel->is_tx) {
 344                controller->tx_mode = new_mode;
 345                musb_writel(musb->ctrl_base, USB_CTRL_TX_MODE, new_mode);
 346        } else {
 347                controller->rx_mode = new_mode;
 348                musb_writel(musb->ctrl_base, USB_CTRL_RX_MODE, new_mode);
 349        }
 350}
 351
 352static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
 353                unsigned mode)
 354{
 355        struct cppi41_dma_controller *controller = cppi41_channel->controller;
 356        u32 port;
 357        u32 new_mode;
 358        u32 old_mode;
 359
 360        old_mode = controller->auto_req;
 361        port = cppi41_channel->port_num;
 362        new_mode = update_ep_mode(port, mode, old_mode);
 363
 364        if (new_mode == old_mode)
 365                return;
 366        controller->auto_req = new_mode;
 367        musb_writel(controller->controller.musb->ctrl_base, USB_CTRL_AUTOREQ,
 368                    new_mode);
 369}
 370
 371static bool cppi41_configure_channel(struct dma_channel *channel,
 372                                u16 packet_sz, u8 mode,
 373                                dma_addr_t dma_addr, u32 len)
 374{
 375        struct cppi41_dma_channel *cppi41_channel = channel->private_data;
 376        struct dma_chan *dc = cppi41_channel->dc;
 377        struct dma_async_tx_descriptor *dma_desc;
 378        enum dma_transfer_direction direction;
 379        struct musb *musb = cppi41_channel->controller->controller.musb;
 380        unsigned use_gen_rndis = 0;
 381
 382        cppi41_channel->buf_addr = dma_addr;
 383        cppi41_channel->total_len = len;
 384        cppi41_channel->transferred = 0;
 385        cppi41_channel->packet_sz = packet_sz;
 386        cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
 387
 388        /*
 389         * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
 390         * than max packet size at a time.
 391         */
 392        if (cppi41_channel->is_tx)
 393                use_gen_rndis = 1;
 394
 395        if (use_gen_rndis) {
 396                /* RNDIS mode */
 397                if (len > packet_sz) {
 398                        musb_writel(musb->ctrl_base,
 399                                RNDIS_REG(cppi41_channel->port_num), len);
 400                        /* gen rndis */
 401                        cppi41_set_dma_mode(cppi41_channel,
 402                                        EP_MODE_DMA_GEN_RNDIS);
 403
 404                        /* auto req */
 405                        cppi41_set_autoreq_mode(cppi41_channel,
 406                                        EP_MODE_AUTOREQ_ALL_NEOP);
 407                } else {
 408                        musb_writel(musb->ctrl_base,
 409                                        RNDIS_REG(cppi41_channel->port_num), 0);
 410                        cppi41_set_dma_mode(cppi41_channel,
 411                                        EP_MODE_DMA_TRANSPARENT);
 412                        cppi41_set_autoreq_mode(cppi41_channel,
 413                                        EP_MODE_AUTOREQ_NONE);
 414                }
 415        } else {
 416                /* fallback mode */
 417                cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
 418                cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
 419                len = min_t(u32, packet_sz, len);
 420        }
 421        cppi41_channel->prog_len = len;
 422        direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
 423        dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
 424                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 425        if (!dma_desc)
 426                return false;
 427
 428        dma_desc->callback_result = cppi41_dma_callback;
 429        dma_desc->callback_param = channel;
 430        cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
 431        cppi41_channel->channel.rx_packet_done = false;
 432
 433        trace_musb_cppi41_config(cppi41_channel);
 434
 435        save_rx_toggle(cppi41_channel);
 436        dma_async_issue_pending(dc);
 437        return true;
 438}
 439
 440static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
 441                                struct musb_hw_ep *hw_ep, u8 is_tx)
 442{
 443        struct cppi41_dma_controller *controller = container_of(c,
 444                        struct cppi41_dma_controller, controller);
 445        struct cppi41_dma_channel *cppi41_channel = NULL;
 446        u8 ch_num = hw_ep->epnum - 1;
 447
 448        if (ch_num >= MUSB_DMA_NUM_CHANNELS)
 449                return NULL;
 450
 451        if (is_tx)
 452                cppi41_channel = &controller->tx_channel[ch_num];
 453        else
 454                cppi41_channel = &controller->rx_channel[ch_num];
 455
 456        if (!cppi41_channel->dc)
 457                return NULL;
 458
 459        if (cppi41_channel->is_allocated)
 460                return NULL;
 461
 462        cppi41_channel->hw_ep = hw_ep;
 463        cppi41_channel->is_allocated = 1;
 464
 465        trace_musb_cppi41_alloc(cppi41_channel);
 466        return &cppi41_channel->channel;
 467}
 468
 469static void cppi41_dma_channel_release(struct dma_channel *channel)
 470{
 471        struct cppi41_dma_channel *cppi41_channel = channel->private_data;
 472
 473        trace_musb_cppi41_free(cppi41_channel);
 474        if (cppi41_channel->is_allocated) {
 475                cppi41_channel->is_allocated = 0;
 476                channel->status = MUSB_DMA_STATUS_FREE;
 477                channel->actual_len = 0;
 478        }
 479}
 480
 481static int cppi41_dma_channel_program(struct dma_channel *channel,
 482                                u16 packet_sz, u8 mode,
 483                                dma_addr_t dma_addr, u32 len)
 484{
 485        int ret;
 486        struct cppi41_dma_channel *cppi41_channel = channel->private_data;
 487        int hb_mult = 0;
 488
 489        BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
 490                channel->status == MUSB_DMA_STATUS_BUSY);
 491
 492        if (is_host_active(cppi41_channel->controller->controller.musb)) {
 493                if (cppi41_channel->is_tx)
 494                        hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
 495                else
 496                        hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
 497        }
 498
 499        channel->status = MUSB_DMA_STATUS_BUSY;
 500        channel->actual_len = 0;
 501
 502        if (hb_mult)
 503                packet_sz = hb_mult * (packet_sz & 0x7FF);
 504
 505        ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
 506        if (!ret)
 507                channel->status = MUSB_DMA_STATUS_FREE;
 508
 509        return ret;
 510}
 511
 512static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
 513                void *buf, u32 length)
 514{
 515        struct cppi41_dma_channel *cppi41_channel = channel->private_data;
 516        struct cppi41_dma_controller *controller = cppi41_channel->controller;
 517        struct musb *musb = controller->controller.musb;
 518
 519        if (is_host_active(musb)) {
 520                WARN_ON(1);
 521                return 1;
 522        }
 523        if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
 524                return 0;
 525        if (cppi41_channel->is_tx)
 526                return 1;
 527        /* AM335x Advisory 1.0.13. No workaround for device RX mode */
 528        return 0;
 529}
 530
 531static int cppi41_dma_channel_abort(struct dma_channel *channel)
 532{
 533        struct cppi41_dma_channel *cppi41_channel = channel->private_data;
 534        struct cppi41_dma_controller *controller = cppi41_channel->controller;
 535        struct musb *musb = controller->controller.musb;
 536        void __iomem *epio = cppi41_channel->hw_ep->regs;
 537        int tdbit;
 538        int ret;
 539        unsigned is_tx;
 540        u16 csr;
 541
 542        is_tx = cppi41_channel->is_tx;
 543        trace_musb_cppi41_abort(cppi41_channel);
 544
 545        if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
 546                return 0;
 547
 548        list_del_init(&cppi41_channel->tx_check);
 549        if (is_tx) {
 550                csr = musb_readw(epio, MUSB_TXCSR);
 551                csr &= ~MUSB_TXCSR_DMAENAB;
 552                musb_writew(epio, MUSB_TXCSR, csr);
 553        } else {
 554                cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
 555
 556                /* delay to drain to cppi dma pipeline for isoch */
 557                udelay(250);
 558
 559                csr = musb_readw(epio, MUSB_RXCSR);
 560                csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
 561                musb_writew(epio, MUSB_RXCSR, csr);
 562
 563                /* wait to drain cppi dma pipe line */
 564                udelay(50);
 565
 566                csr = musb_readw(epio, MUSB_RXCSR);
 567                if (csr & MUSB_RXCSR_RXPKTRDY) {
 568                        csr |= MUSB_RXCSR_FLUSHFIFO;
 569                        musb_writew(epio, MUSB_RXCSR, csr);
 570                        musb_writew(epio, MUSB_RXCSR, csr);
 571                }
 572        }
 573
 574        /* DA8xx Advisory 2.3.27: wait 250 ms before to start the teardown */
 575        if (musb->io.quirks & MUSB_DA8XX)
 576                mdelay(250);
 577
 578        tdbit = 1 << cppi41_channel->port_num;
 579        if (is_tx)
 580                tdbit <<= 16;
 581
 582        do {
 583                if (is_tx)
 584                        musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
 585                ret = dmaengine_terminate_all(cppi41_channel->dc);
 586        } while (ret == -EAGAIN);
 587
 588        if (is_tx) {
 589                musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
 590
 591                csr = musb_readw(epio, MUSB_TXCSR);
 592                if (csr & MUSB_TXCSR_TXPKTRDY) {
 593                        csr |= MUSB_TXCSR_FLUSHFIFO;
 594                        musb_writew(epio, MUSB_TXCSR, csr);
 595                }
 596        }
 597
 598        cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
 599        return 0;
 600}
 601
 602static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
 603{
 604        struct dma_chan *dc;
 605        int i;
 606
 607        for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
 608                dc = ctrl->tx_channel[i].dc;
 609                if (dc)
 610                        dma_release_channel(dc);
 611                dc = ctrl->rx_channel[i].dc;
 612                if (dc)
 613                        dma_release_channel(dc);
 614        }
 615}
 616
 617static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
 618{
 619        cppi41_release_all_dma_chans(controller);
 620}
 621
 622static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
 623{
 624        struct musb *musb = controller->controller.musb;
 625        struct device *dev = musb->controller;
 626        struct device_node *np = dev->parent->of_node;
 627        struct cppi41_dma_channel *cppi41_channel;
 628        int count;
 629        int i;
 630        int ret;
 631
 632        count = of_property_count_strings(np, "dma-names");
 633        if (count < 0)
 634                return count;
 635
 636        for (i = 0; i < count; i++) {
 637                struct dma_chan *dc;
 638                struct dma_channel *musb_dma;
 639                const char *str;
 640                unsigned is_tx;
 641                unsigned int port;
 642
 643                ret = of_property_read_string_index(np, "dma-names", i, &str);
 644                if (ret)
 645                        goto err;
 646                if (strstarts(str, "tx"))
 647                        is_tx = 1;
 648                else if (strstarts(str, "rx"))
 649                        is_tx = 0;
 650                else {
 651                        dev_err(dev, "Wrong dmatype %s\n", str);
 652                        goto err;
 653                }
 654                ret = kstrtouint(str + 2, 0, &port);
 655                if (ret)
 656                        goto err;
 657
 658                ret = -EINVAL;
 659                if (port > MUSB_DMA_NUM_CHANNELS || !port)
 660                        goto err;
 661                if (is_tx)
 662                        cppi41_channel = &controller->tx_channel[port - 1];
 663                else
 664                        cppi41_channel = &controller->rx_channel[port - 1];
 665
 666                cppi41_channel->controller = controller;
 667                cppi41_channel->port_num = port;
 668                cppi41_channel->is_tx = is_tx;
 669                INIT_LIST_HEAD(&cppi41_channel->tx_check);
 670
 671                musb_dma = &cppi41_channel->channel;
 672                musb_dma->private_data = cppi41_channel;
 673                musb_dma->status = MUSB_DMA_STATUS_FREE;
 674                musb_dma->max_len = SZ_4M;
 675
 676                dc = dma_request_chan(dev->parent, str);
 677                if (IS_ERR(dc)) {
 678                        ret = PTR_ERR(dc);
 679                        if (ret != -EPROBE_DEFER)
 680                                dev_err(dev, "Failed to request %s: %d.\n",
 681                                        str, ret);
 682                        goto err;
 683                }
 684
 685                cppi41_channel->dc = dc;
 686        }
 687        return 0;
 688err:
 689        cppi41_release_all_dma_chans(controller);
 690        return ret;
 691}
 692
 693void cppi41_dma_controller_destroy(struct dma_controller *c)
 694{
 695        struct cppi41_dma_controller *controller = container_of(c,
 696                        struct cppi41_dma_controller, controller);
 697
 698        hrtimer_cancel(&controller->early_tx);
 699        cppi41_dma_controller_stop(controller);
 700        kfree(controller);
 701}
 702EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
 703
 704struct dma_controller *
 705cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
 706{
 707        struct cppi41_dma_controller *controller;
 708        int ret = 0;
 709
 710        if (!musb->controller->parent->of_node) {
 711                dev_err(musb->controller, "Need DT for the DMA engine.\n");
 712                return NULL;
 713        }
 714
 715        controller = kzalloc(sizeof(*controller), GFP_KERNEL);
 716        if (!controller)
 717                goto kzalloc_fail;
 718
 719        hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 720        controller->early_tx.function = cppi41_recheck_tx_req;
 721        INIT_LIST_HEAD(&controller->early_tx_list);
 722
 723        controller->controller.channel_alloc = cppi41_dma_channel_allocate;
 724        controller->controller.channel_release = cppi41_dma_channel_release;
 725        controller->controller.channel_program = cppi41_dma_channel_program;
 726        controller->controller.channel_abort = cppi41_dma_channel_abort;
 727        controller->controller.is_compatible = cppi41_is_compatible;
 728        controller->controller.musb = musb;
 729
 730        ret = cppi41_dma_controller_start(controller);
 731        if (ret)
 732                goto plat_get_fail;
 733        return &controller->controller;
 734
 735plat_get_fail:
 736        kfree(controller);
 737kzalloc_fail:
 738        if (ret == -EPROBE_DEFER)
 739                return ERR_PTR(ret);
 740        return NULL;
 741}
 742EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);
 743