linux/drivers/mmc/host/cavium-octeon.c
<<
>>
Prefs
   1/*
   2 * Driver for MMC and SSD cards for Cavium OCTEON SOCs.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 2012-2017 Cavium Inc.
   9 */
  10#include <linux/dma-mapping.h>
  11#include <linux/gpio/consumer.h>
  12#include <linux/interrupt.h>
  13#include <linux/mmc/mmc.h>
  14#include <linux/mmc/slot-gpio.h>
  15#include <linux/module.h>
  16#include <linux/of_platform.h>
  17#include <asm/octeon/octeon.h>
  18#include "cavium.h"
  19
  20#define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull)
  21
  22/*
  23 * The l2c* functions below are used for the EMMC-17978 workaround.
  24 *
  25 * Due to a bug in the design of the MMC bus hardware, the 2nd to last
  26 * cache block of a DMA read must be locked into the L2 Cache.
  27 * Otherwise, data corruption may occur.
  28 */
  29static inline void *phys_to_ptr(u64 address)
  30{
  31        return (void *)(address | (1ull << 63)); /* XKPHYS */
  32}
  33
  34/*
  35 * Lock a single line into L2. The line is zeroed before locking
  36 * to make sure no dram accesses are made.
  37 */
  38static void l2c_lock_line(u64 addr)
  39{
  40        char *addr_ptr = phys_to_ptr(addr);
  41
  42        asm volatile (
  43                "cache 31, %[line]"     /* Unlock the line */
  44                ::[line] "m" (*addr_ptr));
  45}
  46
  47/* Unlock a single line in the L2 cache. */
  48static void l2c_unlock_line(u64 addr)
  49{
  50        char *addr_ptr = phys_to_ptr(addr);
  51
  52        asm volatile (
  53                "cache 23, %[line]"     /* Unlock the line */
  54                ::[line] "m" (*addr_ptr));
  55}
  56
  57/* Locks a memory region in the L2 cache. */
  58static void l2c_lock_mem_region(u64 start, u64 len)
  59{
  60        u64 end;
  61
  62        /* Round start/end to cache line boundaries */
  63        end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
  64        start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
  65
  66        while (start <= end) {
  67                l2c_lock_line(start);
  68                start += CVMX_CACHE_LINE_SIZE;
  69        }
  70        asm volatile("sync");
  71}
  72
  73/* Unlock a memory region in the L2 cache. */
  74static void l2c_unlock_mem_region(u64 start, u64 len)
  75{
  76        u64 end;
  77
  78        /* Round start/end to cache line boundaries */
  79        end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE);
  80        start = ALIGN(start, CVMX_CACHE_LINE_SIZE);
  81
  82        while (start <= end) {
  83                l2c_unlock_line(start);
  84                start += CVMX_CACHE_LINE_SIZE;
  85        }
  86}
  87
  88static void octeon_mmc_acquire_bus(struct cvm_mmc_host *host)
  89{
  90        if (!host->has_ciu3) {
  91                down(&octeon_bootbus_sem);
  92                /* For CN70XX, switch the MMC controller onto the bus. */
  93                if (OCTEON_IS_MODEL(OCTEON_CN70XX))
  94                        writeq(0, (void __iomem *)CVMX_MIO_BOOT_CTL);
  95        } else {
  96                down(&host->mmc_serializer);
  97        }
  98}
  99
 100static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
 101{
 102        if (!host->has_ciu3)
 103                up(&octeon_bootbus_sem);
 104        else
 105                up(&host->mmc_serializer);
 106}
 107
 108static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
 109{
 110        writeq(val, host->base + MIO_EMM_INT(host));
 111        if (!host->has_ciu3)
 112                writeq(val, host->base + MIO_EMM_INT_EN(host));
 113}
 114
 115static void octeon_mmc_set_shared_power(struct cvm_mmc_host *host, int dir)
 116{
 117        if (dir == 0)
 118                if (!atomic_dec_return(&host->shared_power_users))
 119                        gpiod_set_value_cansleep(host->global_pwr_gpiod, 0);
 120        if (dir == 1)
 121                if (atomic_inc_return(&host->shared_power_users) == 1)
 122                        gpiod_set_value_cansleep(host->global_pwr_gpiod, 1);
 123}
 124
 125static void octeon_mmc_dmar_fixup(struct cvm_mmc_host *host,
 126                                  struct mmc_command *cmd,
 127                                  struct mmc_data *data,
 128                                  u64 addr)
 129{
 130        if (cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
 131                return;
 132        if (data->blksz * data->blocks <= 1024)
 133                return;
 134
 135        host->n_minus_one = addr + (data->blksz * data->blocks) - 1024;
 136        l2c_lock_mem_region(host->n_minus_one, 512);
 137}
 138
 139static void octeon_mmc_dmar_fixup_done(struct cvm_mmc_host *host)
 140{
 141        if (!host->n_minus_one)
 142                return;
 143        l2c_unlock_mem_region(host->n_minus_one, 512);
 144        host->n_minus_one = 0;
 145}
 146
 147static int octeon_mmc_probe(struct platform_device *pdev)
 148{
 149        struct device_node *cn, *node = pdev->dev.of_node;
 150        struct cvm_mmc_host *host;
 151        struct resource *res;
 152        void __iomem *base;
 153        int mmc_irq[9];
 154        int i, ret = 0;
 155        u64 val;
 156
 157        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
 158        if (!host)
 159                return -ENOMEM;
 160
 161        spin_lock_init(&host->irq_handler_lock);
 162        sema_init(&host->mmc_serializer, 1);
 163
 164        host->dev = &pdev->dev;
 165        host->acquire_bus = octeon_mmc_acquire_bus;
 166        host->release_bus = octeon_mmc_release_bus;
 167        host->int_enable = octeon_mmc_int_enable;
 168        host->set_shared_power = octeon_mmc_set_shared_power;
 169        if (OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
 170            OCTEON_IS_MODEL(OCTEON_CNF7XXX)) {
 171                host->dmar_fixup = octeon_mmc_dmar_fixup;
 172                host->dmar_fixup_done = octeon_mmc_dmar_fixup_done;
 173        }
 174
 175        host->sys_freq = octeon_get_io_clock_rate();
 176
 177        if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) {
 178                host->big_dma_addr = true;
 179                host->need_irq_handler_lock = true;
 180                host->has_ciu3 = true;
 181                host->use_sg = true;
 182                /*
 183                 * First seven are the EMM_INT bits 0..6, then two for
 184                 * the EMM_DMA_INT bits
 185                 */
 186                for (i = 0; i < 9; i++) {
 187                        mmc_irq[i] = platform_get_irq(pdev, i);
 188                        if (mmc_irq[i] < 0)
 189                                return mmc_irq[i];
 190
 191                        /* work around legacy u-boot device trees */
 192                        irq_set_irq_type(mmc_irq[i], IRQ_TYPE_EDGE_RISING);
 193                }
 194        } else {
 195                host->big_dma_addr = false;
 196                host->need_irq_handler_lock = false;
 197                host->has_ciu3 = false;
 198                /* First one is EMM second DMA */
 199                for (i = 0; i < 2; i++) {
 200                        mmc_irq[i] = platform_get_irq(pdev, i);
 201                        if (mmc_irq[i] < 0)
 202                                return mmc_irq[i];
 203                }
 204        }
 205
 206        host->last_slot = -1;
 207
 208        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 209        if (!res) {
 210                dev_err(&pdev->dev, "Platform resource[0] is missing\n");
 211                return -ENXIO;
 212        }
 213        base = devm_ioremap_resource(&pdev->dev, res);
 214        if (IS_ERR(base))
 215                return PTR_ERR(base);
 216        host->base = (void __iomem *)base;
 217        host->reg_off = 0;
 218
 219        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 220        if (!res) {
 221                dev_err(&pdev->dev, "Platform resource[1] is missing\n");
 222                return -EINVAL;
 223        }
 224        base = devm_ioremap_resource(&pdev->dev, res);
 225        if (IS_ERR(base))
 226                return PTR_ERR(base);
 227        host->dma_base = (void __iomem *)base;
 228        /*
 229         * To keep the register addresses shared we intentionaly use
 230         * a negative offset here, first register used on Octeon therefore
 231         * starts at 0x20 (MIO_EMM_DMA_CFG).
 232         */
 233        host->reg_off_dma = -0x20;
 234
 235        ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
 236        if (ret)
 237                return ret;
 238
 239        /*
 240         * Clear out any pending interrupts that may be left over from
 241         * bootloader.
 242         */
 243        val = readq(host->base + MIO_EMM_INT(host));
 244        writeq(val, host->base + MIO_EMM_INT(host));
 245
 246        if (host->has_ciu3) {
 247                /* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */
 248                for (i = 1; i <= 4; i++) {
 249                        ret = devm_request_irq(&pdev->dev, mmc_irq[i],
 250                                               cvm_mmc_interrupt,
 251                                               0, cvm_mmc_irq_names[i], host);
 252                        if (ret < 0) {
 253                                dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
 254                                        mmc_irq[i]);
 255                                return ret;
 256                        }
 257                }
 258        } else {
 259                ret = devm_request_irq(&pdev->dev, mmc_irq[0],
 260                                       cvm_mmc_interrupt, 0, KBUILD_MODNAME,
 261                                       host);
 262                if (ret < 0) {
 263                        dev_err(&pdev->dev, "Error: devm_request_irq %d\n",
 264                                mmc_irq[0]);
 265                        return ret;
 266                }
 267        }
 268
 269        host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
 270                                                         "power",
 271                                                         GPIOD_OUT_HIGH);
 272        if (IS_ERR(host->global_pwr_gpiod)) {
 273                dev_err(&pdev->dev, "Invalid power GPIO\n");
 274                return PTR_ERR(host->global_pwr_gpiod);
 275        }
 276
 277        platform_set_drvdata(pdev, host);
 278
 279        i = 0;
 280        for_each_child_of_node(node, cn) {
 281                host->slot_pdev[i] =
 282                        of_platform_device_create(cn, NULL, &pdev->dev);
 283                if (!host->slot_pdev[i]) {
 284                        i++;
 285                        continue;
 286                }
 287                ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
 288                if (ret) {
 289                        dev_err(&pdev->dev, "Error populating slots\n");
 290                        octeon_mmc_set_shared_power(host, 0);
 291                        goto error;
 292                }
 293                i++;
 294        }
 295        return 0;
 296
 297error:
 298        for (i = 0; i < CAVIUM_MAX_MMC; i++) {
 299                if (host->slot[i])
 300                        cvm_mmc_of_slot_remove(host->slot[i]);
 301                if (host->slot_pdev[i])
 302                        of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
 303        }
 304        return ret;
 305}
 306
 307static int octeon_mmc_remove(struct platform_device *pdev)
 308{
 309        struct cvm_mmc_host *host = platform_get_drvdata(pdev);
 310        u64 dma_cfg;
 311        int i;
 312
 313        for (i = 0; i < CAVIUM_MAX_MMC; i++)
 314                if (host->slot[i])
 315                        cvm_mmc_of_slot_remove(host->slot[i]);
 316
 317        dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
 318        dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
 319        writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
 320
 321        octeon_mmc_set_shared_power(host, 0);
 322        return 0;
 323}
 324
 325static const struct of_device_id octeon_mmc_match[] = {
 326        {
 327                .compatible = "cavium,octeon-6130-mmc",
 328        },
 329        {
 330                .compatible = "cavium,octeon-7890-mmc",
 331        },
 332        {},
 333};
 334MODULE_DEVICE_TABLE(of, octeon_mmc_match);
 335
 336static struct platform_driver octeon_mmc_driver = {
 337        .probe          = octeon_mmc_probe,
 338        .remove         = octeon_mmc_remove,
 339        .driver         = {
 340                .name   = KBUILD_MODNAME,
 341                .of_match_table = octeon_mmc_match,
 342        },
 343};
 344
 345module_platform_driver(octeon_mmc_driver);
 346
 347MODULE_AUTHOR("Cavium Inc. <support@cavium.com>");
 348MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card");
 349MODULE_LICENSE("GPL");
 350