linux/drivers/mmc/host/alcor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
   4 *
   5 * Driver for Alcor Micro AU6601 and AU6621 controllers
   6 */
   7
   8/* Note: this driver was created without any documentation. Based
   9 * on sniffing, testing and in some cases mimic of original driver.
  10 * As soon as some one with documentation or more experience in SD/MMC, or
  11 * reverse engineering then me, please review this driver and question every
  12 * thing what I did. 2018 Oleksij Rempel <linux@rempel-privat.de>
  13 */
  14
  15#include <linux/delay.h>
  16#include <linux/pci.h>
  17#include <linux/module.h>
  18#include <linux/io.h>
  19#include <linux/pm.h>
  20#include <linux/irq.h>
  21#include <linux/interrupt.h>
  22#include <linux/platform_device.h>
  23
  24#include <linux/mmc/host.h>
  25#include <linux/mmc/mmc.h>
  26
  27#include <linux/alcor_pci.h>
  28
  29enum alcor_cookie {
  30        COOKIE_UNMAPPED,
  31        COOKIE_PRE_MAPPED,
  32        COOKIE_MAPPED,
  33};
  34
  35struct alcor_pll_conf {
  36        unsigned int clk_src_freq;
  37        unsigned int clk_src_reg;
  38        unsigned int min_div;
  39        unsigned int max_div;
  40};
  41
  42struct alcor_sdmmc_host {
  43        struct  device *dev;
  44        struct alcor_pci_priv *alcor_pci;
  45
  46        struct mmc_request *mrq;
  47        struct mmc_command *cmd;
  48        struct mmc_data *data;
  49        unsigned int dma_on:1;
  50
  51        struct mutex cmd_mutex;
  52
  53        struct delayed_work timeout_work;
  54
  55        struct sg_mapping_iter sg_miter;        /* SG state for PIO */
  56        struct scatterlist *sg;
  57        unsigned int blocks;            /* remaining PIO blocks */
  58        int sg_count;
  59
  60        u32                     irq_status_sd;
  61        unsigned char           cur_power_mode;
  62};
  63
  64static const struct alcor_pll_conf alcor_pll_cfg[] = {
  65        /* MHZ,         CLK src,                max div, min div */
  66        { 31250000,     AU6601_CLK_31_25_MHZ,   1,      511},
  67        { 48000000,     AU6601_CLK_48_MHZ,      1,      511},
  68        {125000000,     AU6601_CLK_125_MHZ,     1,      511},
  69        {384000000,     AU6601_CLK_384_MHZ,     1,      511},
  70};
  71
  72static inline void alcor_rmw8(struct alcor_sdmmc_host *host, unsigned int addr,
  73                               u8 clear, u8 set)
  74{
  75        struct alcor_pci_priv *priv = host->alcor_pci;
  76        u32 var;
  77
  78        var = alcor_read8(priv, addr);
  79        var &= ~clear;
  80        var |= set;
  81        alcor_write8(priv, var, addr);
  82}
  83
  84/* As soon as irqs are masked, some status updates may be missed.
  85 * Use this with care.
  86 */
  87static inline void alcor_mask_sd_irqs(struct alcor_sdmmc_host *host)
  88{
  89        struct alcor_pci_priv *priv = host->alcor_pci;
  90
  91        alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
  92}
  93
  94static inline void alcor_unmask_sd_irqs(struct alcor_sdmmc_host *host)
  95{
  96        struct alcor_pci_priv *priv = host->alcor_pci;
  97
  98        alcor_write32(priv, AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK |
  99                  AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE |
 100                  AU6601_INT_OVER_CURRENT_ERR,
 101                  AU6601_REG_INT_ENABLE);
 102}
 103
 104static void alcor_reset(struct alcor_sdmmc_host *host, u8 val)
 105{
 106        struct alcor_pci_priv *priv = host->alcor_pci;
 107        int i;
 108
 109        alcor_write8(priv, val | AU6601_BUF_CTRL_RESET,
 110                      AU6601_REG_SW_RESET);
 111        for (i = 0; i < 100; i++) {
 112                if (!(alcor_read8(priv, AU6601_REG_SW_RESET) & val))
 113                        return;
 114                udelay(50);
 115        }
 116        dev_err(host->dev, "%s: timeout\n", __func__);
 117}
 118
 119/*
 120 * Perform DMA I/O of a single page.
 121 */
 122static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
 123{
 124        struct alcor_pci_priv *priv = host->alcor_pci;
 125        u32 addr;
 126
 127        if (!host->sg_count)
 128                return;
 129
 130        if (!host->sg) {
 131                dev_err(host->dev, "have blocks, but no SG\n");
 132                return;
 133        }
 134
 135        if (!sg_dma_len(host->sg)) {
 136                dev_err(host->dev, "DMA SG len == 0\n");
 137                return;
 138        }
 139
 140
 141        addr = (u32)sg_dma_address(host->sg);
 142
 143        alcor_write32(priv, addr, AU6601_REG_SDMA_ADDR);
 144        host->sg = sg_next(host->sg);
 145        host->sg_count--;
 146}
 147
 148static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
 149{
 150        struct alcor_pci_priv *priv = host->alcor_pci;
 151        struct mmc_data *data = host->data;
 152        u8 ctrl = 0;
 153
 154        if (data->flags & MMC_DATA_WRITE)
 155                ctrl |= AU6601_DATA_WRITE;
 156
 157        if (data->host_cookie == COOKIE_MAPPED) {
 158                /*
 159                 * For DMA transfers, this function is called just once,
 160                 * at the start of the operation. The hardware can only
 161                 * perform DMA I/O on a single page at a time, so here
 162                 * we kick off the transfer with the first page, and expect
 163                 * subsequent pages to be transferred upon IRQ events
 164                 * indicating that the single-page DMA was completed.
 165                 */
 166                alcor_data_set_dma(host);
 167                ctrl |= AU6601_DATA_DMA_MODE;
 168                host->dma_on = 1;
 169                alcor_write32(priv, data->sg_count * 0x1000,
 170                               AU6601_REG_BLOCK_SIZE);
 171        } else {
 172                /*
 173                 * For PIO transfers, we break down each operation
 174                 * into several sector-sized transfers. When one sector has
 175                 * complete, the IRQ handler will call this function again
 176                 * to kick off the transfer of the next sector.
 177                 */
 178                alcor_write32(priv, data->blksz, AU6601_REG_BLOCK_SIZE);
 179        }
 180
 181        alcor_write8(priv, ctrl | AU6601_DATA_START_XFER,
 182                      AU6601_DATA_XFER_CTRL);
 183}
 184
 185static void alcor_trf_block_pio(struct alcor_sdmmc_host *host, bool read)
 186{
 187        struct alcor_pci_priv *priv = host->alcor_pci;
 188        size_t blksize, len;
 189        u8 *buf;
 190
 191        if (!host->blocks)
 192                return;
 193
 194        if (host->dma_on) {
 195                dev_err(host->dev, "configured DMA but got PIO request.\n");
 196                return;
 197        }
 198
 199        if (!!(host->data->flags & MMC_DATA_READ) != read) {
 200                dev_err(host->dev, "got unexpected direction %i != %i\n",
 201                        !!(host->data->flags & MMC_DATA_READ), read);
 202        }
 203
 204        if (!sg_miter_next(&host->sg_miter))
 205                return;
 206
 207        blksize = host->data->blksz;
 208        len = min(host->sg_miter.length, blksize);
 209
 210        dev_dbg(host->dev, "PIO, %s block size: 0x%zx\n",
 211                read ? "read" : "write", blksize);
 212
 213        host->sg_miter.consumed = len;
 214        host->blocks--;
 215
 216        buf = host->sg_miter.addr;
 217
 218        if (read)
 219                ioread32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
 220        else
 221                iowrite32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
 222
 223        sg_miter_stop(&host->sg_miter);
 224}
 225
 226static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
 227{
 228        unsigned int flags = SG_MITER_ATOMIC;
 229        struct mmc_data *data = host->data;
 230
 231        if (data->flags & MMC_DATA_READ)
 232                flags |= SG_MITER_TO_SG;
 233        else
 234                flags |= SG_MITER_FROM_SG;
 235        sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
 236}
 237
 238static void alcor_prepare_data(struct alcor_sdmmc_host *host,
 239                               struct mmc_command *cmd)
 240{
 241        struct alcor_pci_priv *priv = host->alcor_pci;
 242        struct mmc_data *data = cmd->data;
 243
 244        if (!data)
 245                return;
 246
 247
 248        host->data = data;
 249        host->data->bytes_xfered = 0;
 250        host->blocks = data->blocks;
 251        host->sg = data->sg;
 252        host->sg_count = data->sg_count;
 253        dev_dbg(host->dev, "prepare DATA: sg %i, blocks: %i\n",
 254                        host->sg_count, host->blocks);
 255
 256        if (data->host_cookie != COOKIE_MAPPED)
 257                alcor_prepare_sg_miter(host);
 258
 259        alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
 260}
 261
 262static void alcor_send_cmd(struct alcor_sdmmc_host *host,
 263                           struct mmc_command *cmd, bool set_timeout)
 264{
 265        struct alcor_pci_priv *priv = host->alcor_pci;
 266        unsigned long timeout = 0;
 267        u8 ctrl = 0;
 268
 269        host->cmd = cmd;
 270        alcor_prepare_data(host, cmd);
 271
 272        dev_dbg(host->dev, "send CMD. opcode: 0x%02x, arg; 0x%08x\n",
 273                cmd->opcode, cmd->arg);
 274        alcor_write8(priv, cmd->opcode | 0x40, AU6601_REG_CMD_OPCODE);
 275        alcor_write32be(priv, cmd->arg, AU6601_REG_CMD_ARG);
 276
 277        switch (mmc_resp_type(cmd)) {
 278        case MMC_RSP_NONE:
 279                ctrl = AU6601_CMD_NO_RESP;
 280                break;
 281        case MMC_RSP_R1:
 282                ctrl = AU6601_CMD_6_BYTE_CRC;
 283                break;
 284        case MMC_RSP_R1B:
 285                ctrl = AU6601_CMD_6_BYTE_CRC | AU6601_CMD_STOP_WAIT_RDY;
 286                break;
 287        case MMC_RSP_R2:
 288                ctrl = AU6601_CMD_17_BYTE_CRC;
 289                break;
 290        case MMC_RSP_R3:
 291                ctrl = AU6601_CMD_6_BYTE_WO_CRC;
 292                break;
 293        default:
 294                dev_err(host->dev, "%s: cmd->flag (0x%02x) is not valid\n",
 295                        mmc_hostname(mmc_from_priv(host)), mmc_resp_type(cmd));
 296                break;
 297        }
 298
 299        if (set_timeout) {
 300                if (!cmd->data && cmd->busy_timeout)
 301                        timeout = cmd->busy_timeout;
 302                else
 303                        timeout = 10000;
 304
 305                schedule_delayed_work(&host->timeout_work,
 306                                      msecs_to_jiffies(timeout));
 307        }
 308
 309        dev_dbg(host->dev, "xfer ctrl: 0x%02x; timeout: %lu\n", ctrl, timeout);
 310        alcor_write8(priv, ctrl | AU6601_CMD_START_XFER,
 311                                 AU6601_CMD_XFER_CTRL);
 312}
 313
 314static void alcor_request_complete(struct alcor_sdmmc_host *host,
 315                                   bool cancel_timeout)
 316{
 317        struct mmc_request *mrq;
 318
 319        /*
 320         * If this work gets rescheduled while running, it will
 321         * be run again afterwards but without any active request.
 322         */
 323        if (!host->mrq)
 324                return;
 325
 326        if (cancel_timeout)
 327                cancel_delayed_work(&host->timeout_work);
 328
 329        mrq = host->mrq;
 330
 331        host->mrq = NULL;
 332        host->cmd = NULL;
 333        host->data = NULL;
 334        host->dma_on = 0;
 335
 336        mmc_request_done(mmc_from_priv(host), mrq);
 337}
 338
 339static void alcor_finish_data(struct alcor_sdmmc_host *host)
 340{
 341        struct mmc_data *data;
 342
 343        data = host->data;
 344        host->data = NULL;
 345        host->dma_on = 0;
 346
 347        /*
 348         * The specification states that the block count register must
 349         * be updated, but it does not specify at what point in the
 350         * data flow. That makes the register entirely useless to read
 351         * back so we have to assume that nothing made it to the card
 352         * in the event of an error.
 353         */
 354        if (data->error)
 355                data->bytes_xfered = 0;
 356        else
 357                data->bytes_xfered = data->blksz * data->blocks;
 358
 359        /*
 360         * Need to send CMD12 if -
 361         * a) open-ended multiblock transfer (no CMD23)
 362         * b) error in multiblock transfer
 363         */
 364        if (data->stop &&
 365            (data->error ||
 366             !host->mrq->sbc)) {
 367
 368                /*
 369                 * The controller needs a reset of internal state machines
 370                 * upon error conditions.
 371                 */
 372                if (data->error)
 373                        alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
 374
 375                alcor_unmask_sd_irqs(host);
 376                alcor_send_cmd(host, data->stop, false);
 377                return;
 378        }
 379
 380        alcor_request_complete(host, 1);
 381}
 382
 383static void alcor_err_irq(struct alcor_sdmmc_host *host, u32 intmask)
 384{
 385        dev_dbg(host->dev, "ERR IRQ %x\n", intmask);
 386
 387        if (host->cmd) {
 388                if (intmask & AU6601_INT_CMD_TIMEOUT_ERR)
 389                        host->cmd->error = -ETIMEDOUT;
 390                else
 391                        host->cmd->error = -EILSEQ;
 392        }
 393
 394        if (host->data) {
 395                if (intmask & AU6601_INT_DATA_TIMEOUT_ERR)
 396                        host->data->error = -ETIMEDOUT;
 397                else
 398                        host->data->error = -EILSEQ;
 399
 400                host->data->bytes_xfered = 0;
 401        }
 402
 403        alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
 404        alcor_request_complete(host, 1);
 405}
 406
 407static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
 408{
 409        struct alcor_pci_priv *priv = host->alcor_pci;
 410
 411        intmask &= AU6601_INT_CMD_END;
 412
 413        if (!intmask)
 414                return true;
 415
 416        /* got CMD_END but no CMD is in progress, wake thread an process the
 417         * error
 418         */
 419        if (!host->cmd)
 420                return false;
 421
 422        if (host->cmd->flags & MMC_RSP_PRESENT) {
 423                struct mmc_command *cmd = host->cmd;
 424
 425                cmd->resp[0] = alcor_read32be(priv, AU6601_REG_CMD_RSP0);
 426                dev_dbg(host->dev, "RSP0: 0x%04x\n", cmd->resp[0]);
 427                if (host->cmd->flags & MMC_RSP_136) {
 428                        cmd->resp[1] =
 429                                alcor_read32be(priv, AU6601_REG_CMD_RSP1);
 430                        cmd->resp[2] =
 431                                alcor_read32be(priv, AU6601_REG_CMD_RSP2);
 432                        cmd->resp[3] =
 433                                alcor_read32be(priv, AU6601_REG_CMD_RSP3);
 434                        dev_dbg(host->dev, "RSP1,2,3: 0x%04x 0x%04x 0x%04x\n",
 435                                cmd->resp[1], cmd->resp[2], cmd->resp[3]);
 436                }
 437
 438        }
 439
 440        host->cmd->error = 0;
 441
 442        /* Processed actual command. */
 443        if (!host->data)
 444                return false;
 445
 446        alcor_trigger_data_transfer(host);
 447        host->cmd = NULL;
 448        return true;
 449}
 450
 451static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
 452{
 453        intmask &= AU6601_INT_CMD_END;
 454
 455        if (!intmask)
 456                return;
 457
 458        if (!host->cmd && intmask & AU6601_INT_CMD_END) {
 459                dev_dbg(host->dev, "Got command interrupt 0x%08x even though no command operation was in progress.\n",
 460                        intmask);
 461        }
 462
 463        /* Processed actual command. */
 464        if (!host->data)
 465                alcor_request_complete(host, 1);
 466        else
 467                alcor_trigger_data_transfer(host);
 468        host->cmd = NULL;
 469}
 470
 471static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
 472{
 473        u32 tmp;
 474
 475        intmask &= AU6601_INT_DATA_MASK;
 476
 477        /* nothing here to do */
 478        if (!intmask)
 479                return 1;
 480
 481        /* we was too fast and got DATA_END after it was processed?
 482         * lets ignore it for now.
 483         */
 484        if (!host->data && intmask == AU6601_INT_DATA_END)
 485                return 1;
 486
 487        /* looks like an error, so lets handle it. */
 488        if (!host->data)
 489                return 0;
 490
 491        tmp = intmask & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
 492                         | AU6601_INT_DMA_END);
 493        switch (tmp) {
 494        case 0:
 495                break;
 496        case AU6601_INT_READ_BUF_RDY:
 497                alcor_trf_block_pio(host, true);
 498                return 1;
 499        case AU6601_INT_WRITE_BUF_RDY:
 500                alcor_trf_block_pio(host, false);
 501                return 1;
 502        case AU6601_INT_DMA_END:
 503                if (!host->sg_count)
 504                        break;
 505
 506                alcor_data_set_dma(host);
 507                break;
 508        default:
 509                dev_err(host->dev, "Got READ_BUF_RDY and WRITE_BUF_RDY at same time\n");
 510                break;
 511        }
 512
 513        if (intmask & AU6601_INT_DATA_END) {
 514                if (!host->dma_on && host->blocks) {
 515                        alcor_trigger_data_transfer(host);
 516                        return 1;
 517                } else {
 518                        return 0;
 519                }
 520        }
 521
 522        return 1;
 523}
 524
 525static void alcor_data_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
 526{
 527        intmask &= AU6601_INT_DATA_MASK;
 528
 529        if (!intmask)
 530                return;
 531
 532        if (!host->data) {
 533                dev_dbg(host->dev, "Got data interrupt 0x%08x even though no data operation was in progress.\n",
 534                        intmask);
 535                alcor_reset(host, AU6601_RESET_DATA);
 536                return;
 537        }
 538
 539        if (alcor_data_irq_done(host, intmask))
 540                return;
 541
 542        if ((intmask & AU6601_INT_DATA_END) || !host->blocks ||
 543            (host->dma_on && !host->sg_count))
 544                alcor_finish_data(host);
 545}
 546
 547static void alcor_cd_irq(struct alcor_sdmmc_host *host, u32 intmask)
 548{
 549        dev_dbg(host->dev, "card %s\n",
 550                intmask & AU6601_INT_CARD_REMOVE ? "removed" : "inserted");
 551
 552        if (host->mrq) {
 553                dev_dbg(host->dev, "cancel all pending tasks.\n");
 554
 555                if (host->data)
 556                        host->data->error = -ENOMEDIUM;
 557
 558                if (host->cmd)
 559                        host->cmd->error = -ENOMEDIUM;
 560                else
 561                        host->mrq->cmd->error = -ENOMEDIUM;
 562
 563                alcor_request_complete(host, 1);
 564        }
 565
 566        mmc_detect_change(mmc_from_priv(host), msecs_to_jiffies(1));
 567}
 568
 569static irqreturn_t alcor_irq_thread(int irq, void *d)
 570{
 571        struct alcor_sdmmc_host *host = d;
 572        irqreturn_t ret = IRQ_HANDLED;
 573        u32 intmask, tmp;
 574
 575        mutex_lock(&host->cmd_mutex);
 576
 577        intmask = host->irq_status_sd;
 578
 579        /* some thing bad */
 580        if (unlikely(!intmask || AU6601_INT_ALL_MASK == intmask)) {
 581                dev_dbg(host->dev, "unexpected IRQ: 0x%04x\n", intmask);
 582                ret = IRQ_NONE;
 583                goto exit;
 584        }
 585
 586        tmp = intmask & (AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
 587        if (tmp) {
 588                if (tmp & AU6601_INT_ERROR_MASK)
 589                        alcor_err_irq(host, tmp);
 590                else {
 591                        alcor_cmd_irq_thread(host, tmp);
 592                        alcor_data_irq_thread(host, tmp);
 593                }
 594                intmask &= ~(AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
 595        }
 596
 597        if (intmask & (AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE)) {
 598                alcor_cd_irq(host, intmask);
 599                intmask &= ~(AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE);
 600        }
 601
 602        if (intmask & AU6601_INT_OVER_CURRENT_ERR) {
 603                dev_warn(host->dev,
 604                         "warning: over current detected!\n");
 605                intmask &= ~AU6601_INT_OVER_CURRENT_ERR;
 606        }
 607
 608        if (intmask)
 609                dev_dbg(host->dev, "got not handled IRQ: 0x%04x\n", intmask);
 610
 611exit:
 612        mutex_unlock(&host->cmd_mutex);
 613        alcor_unmask_sd_irqs(host);
 614        return ret;
 615}
 616
 617
 618static irqreturn_t alcor_irq(int irq, void *d)
 619{
 620        struct alcor_sdmmc_host *host = d;
 621        struct alcor_pci_priv *priv = host->alcor_pci;
 622        u32 status, tmp;
 623        irqreturn_t ret;
 624        int cmd_done, data_done;
 625
 626        status = alcor_read32(priv, AU6601_REG_INT_STATUS);
 627        if (!status)
 628                return IRQ_NONE;
 629
 630        alcor_write32(priv, status, AU6601_REG_INT_STATUS);
 631
 632        tmp = status & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
 633                        | AU6601_INT_DATA_END | AU6601_INT_DMA_END
 634                        | AU6601_INT_CMD_END);
 635        if (tmp == status) {
 636                cmd_done = alcor_cmd_irq_done(host, tmp);
 637                data_done = alcor_data_irq_done(host, tmp);
 638                /* use fast path for simple tasks */
 639                if (cmd_done && data_done) {
 640                        ret = IRQ_HANDLED;
 641                        goto alcor_irq_done;
 642                }
 643        }
 644
 645        host->irq_status_sd = status;
 646        ret = IRQ_WAKE_THREAD;
 647        alcor_mask_sd_irqs(host);
 648alcor_irq_done:
 649        return ret;
 650}
 651
 652static void alcor_set_clock(struct alcor_sdmmc_host *host, unsigned int clock)
 653{
 654        struct alcor_pci_priv *priv = host->alcor_pci;
 655        int i, diff = 0x7fffffff, tmp_clock = 0;
 656        u16 clk_src = 0;
 657        u8 clk_div = 0;
 658
 659        if (clock == 0) {
 660                alcor_write16(priv, 0, AU6601_CLK_SELECT);
 661                return;
 662        }
 663
 664        for (i = 0; i < ARRAY_SIZE(alcor_pll_cfg); i++) {
 665                unsigned int tmp_div, tmp_diff;
 666                const struct alcor_pll_conf *cfg = &alcor_pll_cfg[i];
 667
 668                tmp_div = DIV_ROUND_UP(cfg->clk_src_freq, clock);
 669                if (cfg->min_div > tmp_div || tmp_div > cfg->max_div)
 670                        continue;
 671
 672                tmp_clock = DIV_ROUND_UP(cfg->clk_src_freq, tmp_div);
 673                tmp_diff = abs(clock - tmp_clock);
 674
 675                if (tmp_diff < diff) {
 676                        diff = tmp_diff;
 677                        clk_src = cfg->clk_src_reg;
 678                        clk_div = tmp_div;
 679                }
 680        }
 681
 682        clk_src |= ((clk_div - 1) << 8);
 683        clk_src |= AU6601_CLK_ENABLE;
 684
 685        dev_dbg(host->dev, "set freq %d cal freq %d, use div %d, mod %x\n",
 686                        clock, tmp_clock, clk_div, clk_src);
 687
 688        alcor_write16(priv, clk_src, AU6601_CLK_SELECT);
 689
 690}
 691
 692static void alcor_set_timing(struct mmc_host *mmc, struct mmc_ios *ios)
 693{
 694        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 695
 696        if (ios->timing == MMC_TIMING_LEGACY) {
 697                alcor_rmw8(host, AU6601_CLK_DELAY,
 698                            AU6601_CLK_POSITIVE_EDGE_ALL, 0);
 699        } else {
 700                alcor_rmw8(host, AU6601_CLK_DELAY,
 701                            0, AU6601_CLK_POSITIVE_EDGE_ALL);
 702        }
 703}
 704
 705static void alcor_set_bus_width(struct mmc_host *mmc, struct mmc_ios *ios)
 706{
 707        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 708        struct alcor_pci_priv *priv = host->alcor_pci;
 709
 710        if (ios->bus_width == MMC_BUS_WIDTH_1) {
 711                alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
 712        } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
 713                alcor_write8(priv, AU6601_BUS_WIDTH_4BIT,
 714                              AU6601_REG_BUS_CTRL);
 715        } else
 716                dev_err(host->dev, "Unknown BUS mode\n");
 717
 718}
 719
 720static int alcor_card_busy(struct mmc_host *mmc)
 721{
 722        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 723        struct alcor_pci_priv *priv = host->alcor_pci;
 724        u8 status;
 725
 726        /* Check whether dat[0:3] low */
 727        status = alcor_read8(priv, AU6601_DATA_PIN_STATE);
 728
 729        return !(status & AU6601_BUS_STAT_DAT_MASK);
 730}
 731
 732static int alcor_get_cd(struct mmc_host *mmc)
 733{
 734        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 735        struct alcor_pci_priv *priv = host->alcor_pci;
 736        u8 detect;
 737
 738        detect = alcor_read8(priv, AU6601_DETECT_STATUS)
 739                & AU6601_DETECT_STATUS_M;
 740        /* check if card is present then send command and data */
 741        return (detect == AU6601_SD_DETECTED);
 742}
 743
 744static int alcor_get_ro(struct mmc_host *mmc)
 745{
 746        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 747        struct alcor_pci_priv *priv = host->alcor_pci;
 748        u8 status;
 749
 750        /* get write protect pin status */
 751        status = alcor_read8(priv, AU6601_INTERFACE_MODE_CTRL);
 752
 753        return !!(status & AU6601_SD_CARD_WP);
 754}
 755
 756static void alcor_request(struct mmc_host *mmc, struct mmc_request *mrq)
 757{
 758        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 759
 760        mutex_lock(&host->cmd_mutex);
 761
 762        host->mrq = mrq;
 763
 764        /* check if card is present then send command and data */
 765        if (alcor_get_cd(mmc))
 766                alcor_send_cmd(host, mrq->cmd, true);
 767        else {
 768                mrq->cmd->error = -ENOMEDIUM;
 769                alcor_request_complete(host, 1);
 770        }
 771
 772        mutex_unlock(&host->cmd_mutex);
 773}
 774
 775static void alcor_pre_req(struct mmc_host *mmc,
 776                           struct mmc_request *mrq)
 777{
 778        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 779        struct mmc_data *data = mrq->data;
 780        struct mmc_command *cmd = mrq->cmd;
 781        struct scatterlist *sg;
 782        unsigned int i, sg_len;
 783
 784        if (!data || !cmd)
 785                return;
 786
 787        data->host_cookie = COOKIE_UNMAPPED;
 788
 789        /* FIXME: looks like the DMA engine works only with CMD18 */
 790        if (cmd->opcode != MMC_READ_MULTIPLE_BLOCK
 791                        && cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
 792                return;
 793        /*
 794         * We don't do DMA on "complex" transfers, i.e. with
 795         * non-word-aligned buffers or lengths. A future improvement
 796         * could be made to use temporary DMA bounce-buffers when these
 797         * requirements are not met.
 798         *
 799         * Also, we don't bother with all the DMA setup overhead for
 800         * short transfers.
 801         */
 802        if (data->blocks * data->blksz < AU6601_MAX_DMA_BLOCK_SIZE)
 803                return;
 804
 805        if (data->blksz & 3)
 806                return;
 807
 808        for_each_sg(data->sg, sg, data->sg_len, i) {
 809                if (sg->length != AU6601_MAX_DMA_BLOCK_SIZE)
 810                        return;
 811                if (sg->offset != 0)
 812                        return;
 813        }
 814
 815        /* This data might be unmapped at this time */
 816
 817        sg_len = dma_map_sg(host->dev, data->sg, data->sg_len,
 818                            mmc_get_dma_dir(data));
 819        if (sg_len)
 820                data->host_cookie = COOKIE_MAPPED;
 821
 822        data->sg_count = sg_len;
 823}
 824
 825static void alcor_post_req(struct mmc_host *mmc,
 826                            struct mmc_request *mrq,
 827                            int err)
 828{
 829        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 830        struct mmc_data *data = mrq->data;
 831
 832        if (!data)
 833                return;
 834
 835        if (data->host_cookie == COOKIE_MAPPED) {
 836                dma_unmap_sg(host->dev,
 837                             data->sg,
 838                             data->sg_len,
 839                             mmc_get_dma_dir(data));
 840        }
 841
 842        data->host_cookie = COOKIE_UNMAPPED;
 843}
 844
 845static void alcor_set_power_mode(struct mmc_host *mmc, struct mmc_ios *ios)
 846{
 847        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 848        struct alcor_pci_priv *priv = host->alcor_pci;
 849
 850        switch (ios->power_mode) {
 851        case MMC_POWER_OFF:
 852                alcor_set_clock(host, ios->clock);
 853                /* set all pins to input */
 854                alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
 855                /* turn of VDD */
 856                alcor_write8(priv, 0, AU6601_POWER_CONTROL);
 857                break;
 858        case MMC_POWER_UP:
 859                break;
 860        case MMC_POWER_ON:
 861                /* This is most trickiest part. The order and timings of
 862                 * instructions seems to play important role. Any changes may
 863                 * confuse internal state engine if this HW.
 864                 * FIXME: If we will ever get access to documentation, then this
 865                 * part should be reviewed again.
 866                 */
 867
 868                /* enable SD card mode */
 869                alcor_write8(priv, AU6601_SD_CARD,
 870                              AU6601_ACTIVE_CTRL);
 871                /* set signal voltage to 3.3V */
 872                alcor_write8(priv, 0, AU6601_OPT);
 873                /* no documentation about clk delay, for now just try to mimic
 874                 * original driver.
 875                 */
 876                alcor_write8(priv, 0x20, AU6601_CLK_DELAY);
 877                /* set BUS width to 1 bit */
 878                alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
 879                /* set CLK first time */
 880                alcor_set_clock(host, ios->clock);
 881                /* power on VDD */
 882                alcor_write8(priv, AU6601_SD_CARD,
 883                              AU6601_POWER_CONTROL);
 884                /* wait until the CLK will get stable */
 885                mdelay(20);
 886                /* set CLK again, mimic original driver. */
 887                alcor_set_clock(host, ios->clock);
 888
 889                /* enable output */
 890                alcor_write8(priv, AU6601_SD_CARD,
 891                              AU6601_OUTPUT_ENABLE);
 892                /* The clk will not work on au6621. We need to trigger data
 893                 * transfer.
 894                 */
 895                alcor_write8(priv, AU6601_DATA_WRITE,
 896                              AU6601_DATA_XFER_CTRL);
 897                /* configure timeout. Not clear what exactly it means. */
 898                alcor_write8(priv, 0x7d, AU6601_TIME_OUT_CTRL);
 899                mdelay(100);
 900                break;
 901        default:
 902                dev_err(host->dev, "Unknown power parameter\n");
 903        }
 904}
 905
 906static void alcor_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 907{
 908        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 909
 910        mutex_lock(&host->cmd_mutex);
 911
 912        dev_dbg(host->dev, "set ios. bus width: %x, power mode: %x\n",
 913                ios->bus_width, ios->power_mode);
 914
 915        if (ios->power_mode != host->cur_power_mode) {
 916                alcor_set_power_mode(mmc, ios);
 917                host->cur_power_mode = ios->power_mode;
 918        } else {
 919                alcor_set_timing(mmc, ios);
 920                alcor_set_bus_width(mmc, ios);
 921                alcor_set_clock(host, ios->clock);
 922        }
 923
 924        mutex_unlock(&host->cmd_mutex);
 925}
 926
 927static int alcor_signal_voltage_switch(struct mmc_host *mmc,
 928                                       struct mmc_ios *ios)
 929{
 930        struct alcor_sdmmc_host *host = mmc_priv(mmc);
 931
 932        mutex_lock(&host->cmd_mutex);
 933
 934        switch (ios->signal_voltage) {
 935        case MMC_SIGNAL_VOLTAGE_330:
 936                alcor_rmw8(host, AU6601_OPT, AU6601_OPT_SD_18V, 0);
 937                break;
 938        case MMC_SIGNAL_VOLTAGE_180:
 939                alcor_rmw8(host, AU6601_OPT, 0, AU6601_OPT_SD_18V);
 940                break;
 941        default:
 942                /* No signal voltage switch required */
 943                break;
 944        }
 945
 946        mutex_unlock(&host->cmd_mutex);
 947        return 0;
 948}
 949
 950static const struct mmc_host_ops alcor_sdc_ops = {
 951        .card_busy      = alcor_card_busy,
 952        .get_cd         = alcor_get_cd,
 953        .get_ro         = alcor_get_ro,
 954        .post_req       = alcor_post_req,
 955        .pre_req        = alcor_pre_req,
 956        .request        = alcor_request,
 957        .set_ios        = alcor_set_ios,
 958        .start_signal_voltage_switch = alcor_signal_voltage_switch,
 959};
 960
 961static void alcor_timeout_timer(struct work_struct *work)
 962{
 963        struct delayed_work *d = to_delayed_work(work);
 964        struct alcor_sdmmc_host *host = container_of(d, struct alcor_sdmmc_host,
 965                                                timeout_work);
 966        mutex_lock(&host->cmd_mutex);
 967
 968        dev_dbg(host->dev, "triggered timeout\n");
 969        if (host->mrq) {
 970                dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
 971
 972                if (host->data) {
 973                        host->data->error = -ETIMEDOUT;
 974                } else {
 975                        if (host->cmd)
 976                                host->cmd->error = -ETIMEDOUT;
 977                        else
 978                                host->mrq->cmd->error = -ETIMEDOUT;
 979                }
 980
 981                alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
 982                alcor_request_complete(host, 0);
 983        }
 984
 985        mutex_unlock(&host->cmd_mutex);
 986}
 987
 988static void alcor_hw_init(struct alcor_sdmmc_host *host)
 989{
 990        struct alcor_pci_priv *priv = host->alcor_pci;
 991        struct alcor_dev_cfg *cfg = priv->cfg;
 992
 993        /* FIXME: This part is a mimics HW init of original driver.
 994         * If we will ever get access to documentation, then this part
 995         * should be reviewed again.
 996         */
 997
 998        /* reset command state engine */
 999        alcor_reset(host, AU6601_RESET_CMD);
1000
1001        alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
1002        /* enable sd card mode */
1003        alcor_write8(priv, AU6601_SD_CARD, AU6601_ACTIVE_CTRL);
1004
1005        /* set BUS width to 1 bit */
1006        alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
1007
1008        /* reset data state engine */
1009        alcor_reset(host, AU6601_RESET_DATA);
1010        /* Not sure if a voodoo with AU6601_DMA_BOUNDARY is really needed */
1011        alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
1012
1013        alcor_write8(priv, 0, AU6601_INTERFACE_MODE_CTRL);
1014        /* not clear what we are doing here. */
1015        alcor_write8(priv, 0x44, AU6601_PAD_DRIVE0);
1016        alcor_write8(priv, 0x44, AU6601_PAD_DRIVE1);
1017        alcor_write8(priv, 0x00, AU6601_PAD_DRIVE2);
1018
1019        /* for 6601 - dma_boundary; for 6621 - dma_page_cnt
1020         * exact meaning of this register is not clear.
1021         */
1022        alcor_write8(priv, cfg->dma, AU6601_DMA_BOUNDARY);
1023
1024        /* make sure all pins are set to input and VDD is off */
1025        alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1026        alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1027
1028        alcor_write8(priv, AU6601_DETECT_EN, AU6601_DETECT_STATUS);
1029        /* now we should be safe to enable IRQs */
1030        alcor_unmask_sd_irqs(host);
1031}
1032
1033static void alcor_hw_uninit(struct alcor_sdmmc_host *host)
1034{
1035        struct alcor_pci_priv *priv = host->alcor_pci;
1036
1037        alcor_mask_sd_irqs(host);
1038        alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
1039
1040        alcor_write8(priv, 0, AU6601_DETECT_STATUS);
1041
1042        alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1043        alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1044
1045        alcor_write8(priv, 0, AU6601_OPT);
1046}
1047
1048static void alcor_init_mmc(struct alcor_sdmmc_host *host)
1049{
1050        struct mmc_host *mmc = mmc_from_priv(host);
1051
1052        mmc->f_min = AU6601_MIN_CLOCK;
1053        mmc->f_max = AU6601_MAX_CLOCK;
1054        mmc->ocr_avail = MMC_VDD_33_34;
1055        mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED
1056                | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
1057                | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50;
1058        mmc->caps2 = MMC_CAP2_NO_SDIO;
1059        mmc->ops = &alcor_sdc_ops;
1060
1061        /* The hardware does DMA data transfer of 4096 bytes to/from a single
1062         * buffer address. Scatterlists are not supported at the hardware
1063         * level, however we can work with them at the driver level,
1064         * provided that each segment is exactly 4096 bytes in size.
1065         * Upon DMA completion of a single segment (signalled via IRQ), we
1066         * immediately proceed to transfer the next segment from the
1067         * scatterlist.
1068         *
1069         * The overall request is limited to 240 sectors, matching the
1070         * original vendor driver.
1071         */
1072        mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
1073        mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
1074        mmc->max_blk_count = 240;
1075        mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
1076        dma_set_max_seg_size(host->dev, mmc->max_seg_size);
1077}
1078
1079static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
1080{
1081        struct alcor_pci_priv *priv = pdev->dev.platform_data;
1082        struct mmc_host *mmc;
1083        struct alcor_sdmmc_host *host;
1084        int ret;
1085
1086        mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1087        if (!mmc) {
1088                dev_err(&pdev->dev, "Can't allocate MMC\n");
1089                return -ENOMEM;
1090        }
1091
1092        host = mmc_priv(mmc);
1093        host->dev = &pdev->dev;
1094        host->cur_power_mode = MMC_POWER_UNDEFINED;
1095        host->alcor_pci = priv;
1096
1097        /* make sure irqs are disabled */
1098        alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
1099        alcor_write32(priv, 0, AU6601_MS_INT_ENABLE);
1100
1101        ret = devm_request_threaded_irq(&pdev->dev, priv->irq,
1102                        alcor_irq, alcor_irq_thread, IRQF_SHARED,
1103                        DRV_NAME_ALCOR_PCI_SDMMC, host);
1104
1105        if (ret) {
1106                dev_err(&pdev->dev, "Failed to get irq for data line\n");
1107                goto free_host;
1108        }
1109
1110        mutex_init(&host->cmd_mutex);
1111        INIT_DELAYED_WORK(&host->timeout_work, alcor_timeout_timer);
1112
1113        alcor_init_mmc(host);
1114        alcor_hw_init(host);
1115
1116        dev_set_drvdata(&pdev->dev, host);
1117        mmc_add_host(mmc);
1118        return 0;
1119
1120free_host:
1121        mmc_free_host(mmc);
1122        return ret;
1123}
1124
1125static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
1126{
1127        struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
1128        struct mmc_host *mmc = mmc_from_priv(host);
1129
1130        if (cancel_delayed_work_sync(&host->timeout_work))
1131                alcor_request_complete(host, 0);
1132
1133        alcor_hw_uninit(host);
1134        mmc_remove_host(mmc);
1135        mmc_free_host(mmc);
1136
1137        return 0;
1138}
1139
1140#ifdef CONFIG_PM_SLEEP
1141static int alcor_pci_sdmmc_suspend(struct device *dev)
1142{
1143        struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1144
1145        if (cancel_delayed_work_sync(&host->timeout_work))
1146                alcor_request_complete(host, 0);
1147
1148        alcor_hw_uninit(host);
1149
1150        return 0;
1151}
1152
1153static int alcor_pci_sdmmc_resume(struct device *dev)
1154{
1155        struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1156
1157        alcor_hw_init(host);
1158
1159        return 0;
1160}
1161#endif /* CONFIG_PM_SLEEP */
1162
1163static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
1164                         alcor_pci_sdmmc_resume);
1165
1166static const struct platform_device_id alcor_pci_sdmmc_ids[] = {
1167        {
1168                .name = DRV_NAME_ALCOR_PCI_SDMMC,
1169        }, {
1170                /* sentinel */
1171        }
1172};
1173MODULE_DEVICE_TABLE(platform, alcor_pci_sdmmc_ids);
1174
1175static struct platform_driver alcor_pci_sdmmc_driver = {
1176        .probe          = alcor_pci_sdmmc_drv_probe,
1177        .remove         = alcor_pci_sdmmc_drv_remove,
1178        .id_table       = alcor_pci_sdmmc_ids,
1179        .driver         = {
1180                .name   = DRV_NAME_ALCOR_PCI_SDMMC,
1181                .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1182                .pm     = &alcor_mmc_pm_ops
1183        },
1184};
1185module_platform_driver(alcor_pci_sdmmc_driver);
1186
1187MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>");
1188MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface");
1189MODULE_LICENSE("GPL");
1190