linux/drivers/scsi/sun_esp.c
<<
>>
Prefs
   1/* sun_esp.c: ESP front-end for Sparc SBUS systems.
   2 *
   3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/types.h>
   8#include <linux/delay.h>
   9#include <linux/module.h>
  10#include <linux/mm.h>
  11#include <linux/init.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/of.h>
  14#include <linux/of_device.h>
  15#include <linux/gfp.h>
  16
  17#include <asm/irq.h>
  18#include <asm/io.h>
  19#include <asm/dma.h>
  20
  21#include <scsi/scsi_host.h>
  22
  23#include "esp_scsi.h"
  24
  25#define DRV_MODULE_NAME         "sun_esp"
  26#define PFX DRV_MODULE_NAME     ": "
  27#define DRV_VERSION             "1.100"
  28#define DRV_MODULE_RELDATE      "August 27, 2008"
  29
  30#define dma_read32(REG) \
  31        sbus_readl(esp->dma_regs + (REG))
  32#define dma_write32(VAL, REG) \
  33        sbus_writel((VAL), esp->dma_regs + (REG))
  34
  35/* DVMA chip revisions */
  36enum dvma_rev {
  37        dvmarev0,
  38        dvmaesc1,
  39        dvmarev1,
  40        dvmarev2,
  41        dvmarev3,
  42        dvmarevplus,
  43        dvmahme
  44};
  45
  46static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
  47{
  48        esp->dma = dma_of;
  49
  50        esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
  51                                   resource_size(&dma_of->resource[0]),
  52                                   "espdma");
  53        if (!esp->dma_regs)
  54                return -ENOMEM;
  55
  56        switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
  57        case DMA_VERS0:
  58                esp->dmarev = dvmarev0;
  59                break;
  60        case DMA_ESCV1:
  61                esp->dmarev = dvmaesc1;
  62                break;
  63        case DMA_VERS1:
  64                esp->dmarev = dvmarev1;
  65                break;
  66        case DMA_VERS2:
  67                esp->dmarev = dvmarev2;
  68                break;
  69        case DMA_VERHME:
  70                esp->dmarev = dvmahme;
  71                break;
  72        case DMA_VERSPLUS:
  73                esp->dmarev = dvmarevplus;
  74                break;
  75        }
  76
  77        return 0;
  78
  79}
  80
  81static int esp_sbus_map_regs(struct esp *esp, int hme)
  82{
  83        struct platform_device *op = esp->dev;
  84        struct resource *res;
  85
  86        /* On HME, two reg sets exist, first is DVMA,
  87         * second is ESP registers.
  88         */
  89        if (hme)
  90                res = &op->resource[1];
  91        else
  92                res = &op->resource[0];
  93
  94        esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
  95        if (!esp->regs)
  96                return -ENOMEM;
  97
  98        return 0;
  99}
 100
 101static int esp_sbus_map_command_block(struct esp *esp)
 102{
 103        struct platform_device *op = esp->dev;
 104
 105        esp->command_block = dma_alloc_coherent(&op->dev, 16,
 106                                                &esp->command_block_dma,
 107                                                GFP_ATOMIC);
 108        if (!esp->command_block)
 109                return -ENOMEM;
 110        return 0;
 111}
 112
 113static int esp_sbus_register_irq(struct esp *esp)
 114{
 115        struct Scsi_Host *host = esp->host;
 116        struct platform_device *op = esp->dev;
 117
 118        host->irq = op->archdata.irqs[0];
 119        return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
 120}
 121
 122static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
 123{
 124        struct platform_device *op = esp->dev;
 125        struct device_node *dp;
 126
 127        dp = op->dev.of_node;
 128        esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
 129        if (esp->scsi_id != 0xff)
 130                goto done;
 131
 132        esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
 133        if (esp->scsi_id != 0xff)
 134                goto done;
 135
 136        esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
 137                                             "scsi-initiator-id", 7);
 138
 139done:
 140        esp->host->this_id = esp->scsi_id;
 141        esp->scsi_id_mask = (1 << esp->scsi_id);
 142}
 143
 144static void esp_get_differential(struct esp *esp)
 145{
 146        struct platform_device *op = esp->dev;
 147        struct device_node *dp;
 148
 149        dp = op->dev.of_node;
 150        if (of_find_property(dp, "differential", NULL))
 151                esp->flags |= ESP_FLAG_DIFFERENTIAL;
 152        else
 153                esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
 154}
 155
 156static void esp_get_clock_params(struct esp *esp)
 157{
 158        struct platform_device *op = esp->dev;
 159        struct device_node *bus_dp, *dp;
 160        int fmhz;
 161
 162        dp = op->dev.of_node;
 163        bus_dp = dp->parent;
 164
 165        fmhz = of_getintprop_default(dp, "clock-frequency", 0);
 166        if (fmhz == 0)
 167                fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
 168
 169        esp->cfreq = fmhz;
 170}
 171
 172static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
 173{
 174        struct device_node *dma_dp = dma_of->dev.of_node;
 175        struct platform_device *op = esp->dev;
 176        struct device_node *dp;
 177        u8 bursts, val;
 178
 179        dp = op->dev.of_node;
 180        bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
 181        val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
 182        if (val != 0xff)
 183                bursts &= val;
 184
 185        val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
 186        if (val != 0xff)
 187                bursts &= val;
 188
 189        if (bursts == 0xff ||
 190            (bursts & DMA_BURST16) == 0 ||
 191            (bursts & DMA_BURST32) == 0)
 192                bursts = (DMA_BURST32 - 1);
 193
 194        esp->bursts = bursts;
 195}
 196
 197static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
 198{
 199        esp_get_scsi_id(esp, espdma);
 200        esp_get_differential(esp);
 201        esp_get_clock_params(esp);
 202        esp_get_bursts(esp, espdma);
 203}
 204
 205static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
 206{
 207        sbus_writeb(val, esp->regs + (reg * 4UL));
 208}
 209
 210static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
 211{
 212        return sbus_readb(esp->regs + (reg * 4UL));
 213}
 214
 215static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
 216                                      size_t sz, int dir)
 217{
 218        struct platform_device *op = esp->dev;
 219
 220        return dma_map_single(&op->dev, buf, sz, dir);
 221}
 222
 223static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
 224                                  int num_sg, int dir)
 225{
 226        struct platform_device *op = esp->dev;
 227
 228        return dma_map_sg(&op->dev, sg, num_sg, dir);
 229}
 230
 231static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
 232                                  size_t sz, int dir)
 233{
 234        struct platform_device *op = esp->dev;
 235
 236        dma_unmap_single(&op->dev, addr, sz, dir);
 237}
 238
 239static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
 240                              int num_sg, int dir)
 241{
 242        struct platform_device *op = esp->dev;
 243
 244        dma_unmap_sg(&op->dev, sg, num_sg, dir);
 245}
 246
 247static int sbus_esp_irq_pending(struct esp *esp)
 248{
 249        if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
 250                return 1;
 251        return 0;
 252}
 253
 254static void sbus_esp_reset_dma(struct esp *esp)
 255{
 256        int can_do_burst16, can_do_burst32, can_do_burst64;
 257        int can_do_sbus64, lim;
 258        struct platform_device *op;
 259        u32 val;
 260
 261        can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
 262        can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
 263        can_do_burst64 = 0;
 264        can_do_sbus64 = 0;
 265        op = esp->dev;
 266        if (sbus_can_dma_64bit())
 267                can_do_sbus64 = 1;
 268        if (sbus_can_burst64())
 269                can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
 270
 271        /* Put the DVMA into a known state. */
 272        if (esp->dmarev != dvmahme) {
 273                val = dma_read32(DMA_CSR);
 274                dma_write32(val | DMA_RST_SCSI, DMA_CSR);
 275                dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
 276        }
 277        switch (esp->dmarev) {
 278        case dvmahme:
 279                dma_write32(DMA_RESET_FAS366, DMA_CSR);
 280                dma_write32(DMA_RST_SCSI, DMA_CSR);
 281
 282                esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
 283                                        DMA_SCSI_DISAB | DMA_INT_ENAB);
 284
 285                esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
 286                                          DMA_BRST_SZ);
 287
 288                if (can_do_burst64)
 289                        esp->prev_hme_dmacsr |= DMA_BRST64;
 290                else if (can_do_burst32)
 291                        esp->prev_hme_dmacsr |= DMA_BRST32;
 292
 293                if (can_do_sbus64) {
 294                        esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
 295                        sbus_set_sbus64(&op->dev, esp->bursts);
 296                }
 297
 298                lim = 1000;
 299                while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
 300                        if (--lim == 0) {
 301                                printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
 302                                       "will not clear!\n",
 303                                       esp->host->unique_id);
 304                                break;
 305                        }
 306                        udelay(1);
 307                }
 308
 309                dma_write32(0, DMA_CSR);
 310                dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
 311
 312                dma_write32(0, DMA_ADDR);
 313                break;
 314
 315        case dvmarev2:
 316                if (esp->rev != ESP100) {
 317                        val = dma_read32(DMA_CSR);
 318                        dma_write32(val | DMA_3CLKS, DMA_CSR);
 319                }
 320                break;
 321
 322        case dvmarev3:
 323                val = dma_read32(DMA_CSR);
 324                val &= ~DMA_3CLKS;
 325                val |= DMA_2CLKS;
 326                if (can_do_burst32) {
 327                        val &= ~DMA_BRST_SZ;
 328                        val |= DMA_BRST32;
 329                }
 330                dma_write32(val, DMA_CSR);
 331                break;
 332
 333        case dvmaesc1:
 334                val = dma_read32(DMA_CSR);
 335                val |= DMA_ADD_ENABLE;
 336                val &= ~DMA_BCNT_ENAB;
 337                if (!can_do_burst32 && can_do_burst16) {
 338                        val |= DMA_ESC_BURST;
 339                } else {
 340                        val &= ~(DMA_ESC_BURST);
 341                }
 342                dma_write32(val, DMA_CSR);
 343                break;
 344
 345        default:
 346                break;
 347        }
 348
 349        /* Enable interrupts.  */
 350        val = dma_read32(DMA_CSR);
 351        dma_write32(val | DMA_INT_ENAB, DMA_CSR);
 352}
 353
 354static void sbus_esp_dma_drain(struct esp *esp)
 355{
 356        u32 csr;
 357        int lim;
 358
 359        if (esp->dmarev == dvmahme)
 360                return;
 361
 362        csr = dma_read32(DMA_CSR);
 363        if (!(csr & DMA_FIFO_ISDRAIN))
 364                return;
 365
 366        if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
 367                dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
 368
 369        lim = 1000;
 370        while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
 371                if (--lim == 0) {
 372                        printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
 373                               esp->host->unique_id);
 374                        break;
 375                }
 376                udelay(1);
 377        }
 378}
 379
 380static void sbus_esp_dma_invalidate(struct esp *esp)
 381{
 382        if (esp->dmarev == dvmahme) {
 383                dma_write32(DMA_RST_SCSI, DMA_CSR);
 384
 385                esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
 386                                         (DMA_PARITY_OFF | DMA_2CLKS |
 387                                          DMA_SCSI_DISAB | DMA_INT_ENAB)) &
 388                                        ~(DMA_ST_WRITE | DMA_ENABLE));
 389
 390                dma_write32(0, DMA_CSR);
 391                dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
 392
 393                /* This is necessary to avoid having the SCSI channel
 394                 * engine lock up on us.
 395                 */
 396                dma_write32(0, DMA_ADDR);
 397        } else {
 398                u32 val;
 399                int lim;
 400
 401                lim = 1000;
 402                while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
 403                        if (--lim == 0) {
 404                                printk(KERN_ALERT PFX "esp%d: DMA will not "
 405                                       "invalidate!\n", esp->host->unique_id);
 406                                break;
 407                        }
 408                        udelay(1);
 409                }
 410
 411                val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
 412                val |= DMA_FIFO_INV;
 413                dma_write32(val, DMA_CSR);
 414                val &= ~DMA_FIFO_INV;
 415                dma_write32(val, DMA_CSR);
 416        }
 417}
 418
 419static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
 420                                  u32 dma_count, int write, u8 cmd)
 421{
 422        u32 csr;
 423
 424        BUG_ON(!(cmd & ESP_CMD_DMA));
 425
 426        sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
 427        sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
 428        if (esp->rev == FASHME) {
 429                sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
 430                sbus_esp_write8(esp, 0, FAS_RHI);
 431
 432                scsi_esp_cmd(esp, cmd);
 433
 434                csr = esp->prev_hme_dmacsr;
 435                csr |= DMA_SCSI_DISAB | DMA_ENABLE;
 436                if (write)
 437                        csr |= DMA_ST_WRITE;
 438                else
 439                        csr &= ~DMA_ST_WRITE;
 440                esp->prev_hme_dmacsr = csr;
 441
 442                dma_write32(dma_count, DMA_COUNT);
 443                dma_write32(addr, DMA_ADDR);
 444                dma_write32(csr, DMA_CSR);
 445        } else {
 446                csr = dma_read32(DMA_CSR);
 447                csr |= DMA_ENABLE;
 448                if (write)
 449                        csr |= DMA_ST_WRITE;
 450                else
 451                        csr &= ~DMA_ST_WRITE;
 452                dma_write32(csr, DMA_CSR);
 453                if (esp->dmarev == dvmaesc1) {
 454                        u32 end = PAGE_ALIGN(addr + dma_count + 16U);
 455                        dma_write32(end - addr, DMA_COUNT);
 456                }
 457                dma_write32(addr, DMA_ADDR);
 458
 459                scsi_esp_cmd(esp, cmd);
 460        }
 461
 462}
 463
 464static int sbus_esp_dma_error(struct esp *esp)
 465{
 466        u32 csr = dma_read32(DMA_CSR);
 467
 468        if (csr & DMA_HNDL_ERROR)
 469                return 1;
 470
 471        return 0;
 472}
 473
 474static const struct esp_driver_ops sbus_esp_ops = {
 475        .esp_write8     =       sbus_esp_write8,
 476        .esp_read8      =       sbus_esp_read8,
 477        .map_single     =       sbus_esp_map_single,
 478        .map_sg         =       sbus_esp_map_sg,
 479        .unmap_single   =       sbus_esp_unmap_single,
 480        .unmap_sg       =       sbus_esp_unmap_sg,
 481        .irq_pending    =       sbus_esp_irq_pending,
 482        .reset_dma      =       sbus_esp_reset_dma,
 483        .dma_drain      =       sbus_esp_dma_drain,
 484        .dma_invalidate =       sbus_esp_dma_invalidate,
 485        .send_dma_cmd   =       sbus_esp_send_dma_cmd,
 486        .dma_error      =       sbus_esp_dma_error,
 487};
 488
 489static int esp_sbus_probe_one(struct platform_device *op,
 490                              struct platform_device *espdma, int hme)
 491{
 492        struct scsi_host_template *tpnt = &scsi_esp_template;
 493        struct Scsi_Host *host;
 494        struct esp *esp;
 495        int err;
 496
 497        host = scsi_host_alloc(tpnt, sizeof(struct esp));
 498
 499        err = -ENOMEM;
 500        if (!host)
 501                goto fail;
 502
 503        host->max_id = (hme ? 16 : 8);
 504        esp = shost_priv(host);
 505
 506        esp->host = host;
 507        esp->dev = op;
 508        esp->ops = &sbus_esp_ops;
 509
 510        if (hme)
 511                esp->flags |= ESP_FLAG_WIDE_CAPABLE;
 512
 513        err = esp_sbus_setup_dma(esp, espdma);
 514        if (err < 0)
 515                goto fail_unlink;
 516
 517        err = esp_sbus_map_regs(esp, hme);
 518        if (err < 0)
 519                goto fail_unlink;
 520
 521        err = esp_sbus_map_command_block(esp);
 522        if (err < 0)
 523                goto fail_unmap_regs;
 524
 525        err = esp_sbus_register_irq(esp);
 526        if (err < 0)
 527                goto fail_unmap_command_block;
 528
 529        esp_sbus_get_props(esp, espdma);
 530
 531        /* Before we try to touch the ESP chip, ESC1 dma can
 532         * come up with the reset bit set, so make sure that
 533         * is clear first.
 534         */
 535        if (esp->dmarev == dvmaesc1) {
 536                u32 val = dma_read32(DMA_CSR);
 537
 538                dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
 539        }
 540
 541        dev_set_drvdata(&op->dev, esp);
 542
 543        err = scsi_esp_register(esp, &op->dev);
 544        if (err)
 545                goto fail_free_irq;
 546
 547        return 0;
 548
 549fail_free_irq:
 550        free_irq(host->irq, esp);
 551fail_unmap_command_block:
 552        dma_free_coherent(&op->dev, 16,
 553                          esp->command_block,
 554                          esp->command_block_dma);
 555fail_unmap_regs:
 556        of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
 557fail_unlink:
 558        scsi_host_put(host);
 559fail:
 560        return err;
 561}
 562
 563static int esp_sbus_probe(struct platform_device *op)
 564{
 565        struct device_node *dma_node = NULL;
 566        struct device_node *dp = op->dev.of_node;
 567        struct platform_device *dma_of = NULL;
 568        int hme = 0;
 569
 570        if (dp->parent &&
 571            (!strcmp(dp->parent->name, "espdma") ||
 572             !strcmp(dp->parent->name, "dma")))
 573                dma_node = dp->parent;
 574        else if (!strcmp(dp->name, "SUNW,fas")) {
 575                dma_node = op->dev.of_node;
 576                hme = 1;
 577        }
 578        if (dma_node)
 579                dma_of = of_find_device_by_node(dma_node);
 580        if (!dma_of)
 581                return -ENODEV;
 582
 583        return esp_sbus_probe_one(op, dma_of, hme);
 584}
 585
 586static int esp_sbus_remove(struct platform_device *op)
 587{
 588        struct esp *esp = dev_get_drvdata(&op->dev);
 589        struct platform_device *dma_of = esp->dma;
 590        unsigned int irq = esp->host->irq;
 591        bool is_hme;
 592        u32 val;
 593
 594        scsi_esp_unregister(esp);
 595
 596        /* Disable interrupts.  */
 597        val = dma_read32(DMA_CSR);
 598        dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
 599
 600        free_irq(irq, esp);
 601
 602        is_hme = (esp->dmarev == dvmahme);
 603
 604        dma_free_coherent(&op->dev, 16,
 605                          esp->command_block,
 606                          esp->command_block_dma);
 607        of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
 608                   SBUS_ESP_REG_SIZE);
 609        of_iounmap(&dma_of->resource[0], esp->dma_regs,
 610                   resource_size(&dma_of->resource[0]));
 611
 612        scsi_host_put(esp->host);
 613
 614        dev_set_drvdata(&op->dev, NULL);
 615
 616        return 0;
 617}
 618
 619static const struct of_device_id esp_match[] = {
 620        {
 621                .name = "SUNW,esp",
 622        },
 623        {
 624                .name = "SUNW,fas",
 625        },
 626        {
 627                .name = "esp",
 628        },
 629        {},
 630};
 631MODULE_DEVICE_TABLE(of, esp_match);
 632
 633static struct platform_driver esp_sbus_driver = {
 634        .driver = {
 635                .name = "esp",
 636                .owner = THIS_MODULE,
 637                .of_match_table = esp_match,
 638        },
 639        .probe          = esp_sbus_probe,
 640        .remove         = esp_sbus_remove,
 641};
 642
 643static int __init sunesp_init(void)
 644{
 645        return platform_driver_register(&esp_sbus_driver);
 646}
 647
 648static void __exit sunesp_exit(void)
 649{
 650        platform_driver_unregister(&esp_sbus_driver);
 651}
 652
 653MODULE_DESCRIPTION("Sun ESP SCSI driver");
 654MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
 655MODULE_LICENSE("GPL");
 656MODULE_VERSION(DRV_VERSION);
 657
 658module_init(sunesp_init);
 659module_exit(sunesp_exit);
 660