linux/drivers/scsi/am53c974.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * AMD am53c974 driver.
   4 * Copyright (c) 2014 Hannes Reinecke, SUSE Linux GmbH
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/init.h>
  10#include <linux/delay.h>
  11#include <linux/pci.h>
  12#include <linux/interrupt.h>
  13
  14#include <scsi/scsi_host.h>
  15
  16#include "esp_scsi.h"
  17
  18#define DRV_MODULE_NAME "am53c974"
  19#define DRV_MODULE_VERSION "1.00"
  20
  21static bool am53c974_debug;
  22static bool am53c974_fenab = true;
  23
  24#define esp_dma_log(f, a...)                                            \
  25        do {                                                            \
  26                if (am53c974_debug)                                     \
  27                        shost_printk(KERN_DEBUG, esp->host, f, ##a);    \
  28        } while (0)
  29
  30#define ESP_DMA_CMD 0x10
  31#define ESP_DMA_STC 0x11
  32#define ESP_DMA_SPA 0x12
  33#define ESP_DMA_WBC 0x13
  34#define ESP_DMA_WAC 0x14
  35#define ESP_DMA_STATUS 0x15
  36#define ESP_DMA_SMDLA 0x16
  37#define ESP_DMA_WMAC 0x17
  38
  39#define ESP_DMA_CMD_IDLE 0x00
  40#define ESP_DMA_CMD_BLAST 0x01
  41#define ESP_DMA_CMD_ABORT 0x02
  42#define ESP_DMA_CMD_START 0x03
  43#define ESP_DMA_CMD_MASK  0x03
  44#define ESP_DMA_CMD_DIAG 0x04
  45#define ESP_DMA_CMD_MDL 0x10
  46#define ESP_DMA_CMD_INTE_P 0x20
  47#define ESP_DMA_CMD_INTE_D 0x40
  48#define ESP_DMA_CMD_DIR 0x80
  49
  50#define ESP_DMA_STAT_PWDN 0x01
  51#define ESP_DMA_STAT_ERROR 0x02
  52#define ESP_DMA_STAT_ABORT 0x04
  53#define ESP_DMA_STAT_DONE 0x08
  54#define ESP_DMA_STAT_SCSIINT 0x10
  55#define ESP_DMA_STAT_BCMPLT 0x20
  56
  57/* EEPROM is accessed with 16-bit values */
  58#define DC390_EEPROM_READ 0x80
  59#define DC390_EEPROM_LEN 0x40
  60
  61/*
  62 * DC390 EEPROM
  63 *
  64 * 8 * 4 bytes of per-device options
  65 * followed by HBA specific options
  66 */
  67
  68/* Per-device options */
  69#define DC390_EE_MODE1 0x00
  70#define DC390_EE_SPEED 0x01
  71
  72/* HBA-specific options */
  73#define DC390_EE_ADAPT_SCSI_ID 0x40
  74#define DC390_EE_MODE2 0x41
  75#define DC390_EE_DELAY 0x42
  76#define DC390_EE_TAG_CMD_NUM 0x43
  77
  78#define DC390_EE_MODE1_PARITY_CHK   0x01
  79#define DC390_EE_MODE1_SYNC_NEGO    0x02
  80#define DC390_EE_MODE1_EN_DISC      0x04
  81#define DC390_EE_MODE1_SEND_START   0x08
  82#define DC390_EE_MODE1_TCQ          0x10
  83
  84#define DC390_EE_MODE2_MORE_2DRV    0x01
  85#define DC390_EE_MODE2_GREATER_1G   0x02
  86#define DC390_EE_MODE2_RST_SCSI_BUS 0x04
  87#define DC390_EE_MODE2_ACTIVE_NEGATION 0x08
  88#define DC390_EE_MODE2_NO_SEEK      0x10
  89#define DC390_EE_MODE2_LUN_CHECK    0x20
  90
  91struct pci_esp_priv {
  92        struct esp *esp;
  93        u8 dma_status;
  94};
  95
  96static void pci_esp_dma_drain(struct esp *esp);
  97
  98static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
  99{
 100        return dev_get_drvdata(esp->dev);
 101}
 102
 103static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
 104{
 105        iowrite8(val, esp->regs + (reg * 4UL));
 106}
 107
 108static u8 pci_esp_read8(struct esp *esp, unsigned long reg)
 109{
 110        return ioread8(esp->regs + (reg * 4UL));
 111}
 112
 113static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
 114{
 115        return iowrite32(val, esp->regs + (reg * 4UL));
 116}
 117
 118static int pci_esp_irq_pending(struct esp *esp)
 119{
 120        struct pci_esp_priv *pep = pci_esp_get_priv(esp);
 121
 122        pep->dma_status = pci_esp_read8(esp, ESP_DMA_STATUS);
 123        esp_dma_log("dma intr dreg[%02x]\n", pep->dma_status);
 124
 125        if (pep->dma_status & (ESP_DMA_STAT_ERROR |
 126                               ESP_DMA_STAT_ABORT |
 127                               ESP_DMA_STAT_DONE |
 128                               ESP_DMA_STAT_SCSIINT))
 129                return 1;
 130
 131        return 0;
 132}
 133
 134static void pci_esp_reset_dma(struct esp *esp)
 135{
 136        /* Nothing to do ? */
 137}
 138
 139static void pci_esp_dma_drain(struct esp *esp)
 140{
 141        u8 resid;
 142        int lim = 1000;
 143
 144
 145        if ((esp->sreg & ESP_STAT_PMASK) == ESP_DOP ||
 146            (esp->sreg & ESP_STAT_PMASK) == ESP_DIP)
 147                /* Data-In or Data-Out, nothing to be done */
 148                return;
 149
 150        while (--lim > 0) {
 151                resid = pci_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES;
 152                if (resid <= 1)
 153                        break;
 154                cpu_relax();
 155        }
 156
 157        /*
 158         * When there is a residual BCMPLT will never be set
 159         * (obviously). But we still have to issue the BLAST
 160         * command, otherwise the data will not being transferred.
 161         * But we'll never know when the BLAST operation is
 162         * finished. So check for some time and give up eventually.
 163         */
 164        lim = 1000;
 165        pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_BLAST, ESP_DMA_CMD);
 166        while (pci_esp_read8(esp, ESP_DMA_STATUS) & ESP_DMA_STAT_BCMPLT) {
 167                if (--lim == 0)
 168                        break;
 169                cpu_relax();
 170        }
 171        pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
 172        esp_dma_log("DMA blast done (%d tries, %d bytes left)\n", lim, resid);
 173        /* BLAST residual handling is currently untested */
 174        if (WARN_ON_ONCE(resid == 1)) {
 175                struct esp_cmd_entry *ent = esp->active_cmd;
 176
 177                ent->flags |= ESP_CMD_FLAG_RESIDUAL;
 178        }
 179}
 180
 181static void pci_esp_dma_invalidate(struct esp *esp)
 182{
 183        struct pci_esp_priv *pep = pci_esp_get_priv(esp);
 184
 185        esp_dma_log("invalidate DMA\n");
 186
 187        pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
 188        pep->dma_status = 0;
 189}
 190
 191static int pci_esp_dma_error(struct esp *esp)
 192{
 193        struct pci_esp_priv *pep = pci_esp_get_priv(esp);
 194
 195        if (pep->dma_status & ESP_DMA_STAT_ERROR) {
 196                u8 dma_cmd = pci_esp_read8(esp, ESP_DMA_CMD);
 197
 198                if ((dma_cmd & ESP_DMA_CMD_MASK) == ESP_DMA_CMD_START)
 199                        pci_esp_write8(esp, ESP_DMA_CMD_ABORT, ESP_DMA_CMD);
 200
 201                return 1;
 202        }
 203        if (pep->dma_status & ESP_DMA_STAT_ABORT) {
 204                pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
 205                pep->dma_status = pci_esp_read8(esp, ESP_DMA_CMD);
 206                return 1;
 207        }
 208        return 0;
 209}
 210
 211static void pci_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
 212                                 u32 dma_count, int write, u8 cmd)
 213{
 214        struct pci_esp_priv *pep = pci_esp_get_priv(esp);
 215        u32 val = 0;
 216
 217        BUG_ON(!(cmd & ESP_CMD_DMA));
 218
 219        pep->dma_status = 0;
 220
 221        /* Set DMA engine to IDLE */
 222        if (write)
 223                /* DMA write direction logic is inverted */
 224                val |= ESP_DMA_CMD_DIR;
 225        pci_esp_write8(esp, ESP_DMA_CMD_IDLE | val, ESP_DMA_CMD);
 226
 227        pci_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
 228        pci_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
 229        if (esp->config2 & ESP_CONFIG2_FENAB)
 230                pci_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 231
 232        pci_esp_write32(esp, esp_count, ESP_DMA_STC);
 233        pci_esp_write32(esp, addr, ESP_DMA_SPA);
 234
 235        esp_dma_log("start dma addr[%x] count[%d:%d]\n",
 236                    addr, esp_count, dma_count);
 237
 238        scsi_esp_cmd(esp, cmd);
 239        /* Send DMA Start command */
 240        pci_esp_write8(esp, ESP_DMA_CMD_START | val, ESP_DMA_CMD);
 241}
 242
 243static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
 244{
 245        int dma_limit = 16;
 246        u32 base, end;
 247
 248        /*
 249         * If CONFIG2_FENAB is set we can
 250         * handle up to 24 bit addresses
 251         */
 252        if (esp->config2 & ESP_CONFIG2_FENAB)
 253                dma_limit = 24;
 254
 255        if (dma_len > (1U << dma_limit))
 256                dma_len = (1U << dma_limit);
 257
 258        /*
 259         * Prevent crossing a 24-bit address boundary.
 260         */
 261        base = dma_addr & ((1U << 24) - 1U);
 262        end = base + dma_len;
 263        if (end > (1U << 24))
 264                end = (1U <<24);
 265        dma_len = end - base;
 266
 267        return dma_len;
 268}
 269
 270static const struct esp_driver_ops pci_esp_ops = {
 271        .esp_write8     =       pci_esp_write8,
 272        .esp_read8      =       pci_esp_read8,
 273        .irq_pending    =       pci_esp_irq_pending,
 274        .reset_dma      =       pci_esp_reset_dma,
 275        .dma_drain      =       pci_esp_dma_drain,
 276        .dma_invalidate =       pci_esp_dma_invalidate,
 277        .send_dma_cmd   =       pci_esp_send_dma_cmd,
 278        .dma_error      =       pci_esp_dma_error,
 279        .dma_length_limit =     pci_esp_dma_length_limit,
 280};
 281
 282/*
 283 * Read DC-390 eeprom
 284 */
 285static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
 286{
 287        u8 carry_flag = 1, j = 0x80, bval;
 288        int i;
 289
 290        for (i = 0; i < 9; i++) {
 291                if (carry_flag) {
 292                        pci_write_config_byte(pdev, 0x80, 0x40);
 293                        bval = 0xc0;
 294                } else
 295                        bval = 0x80;
 296
 297                udelay(160);
 298                pci_write_config_byte(pdev, 0x80, bval);
 299                udelay(160);
 300                pci_write_config_byte(pdev, 0x80, 0);
 301                udelay(160);
 302
 303                carry_flag = (cmd & j) ? 1 : 0;
 304                j >>= 1;
 305        }
 306}
 307
 308static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
 309{
 310        int i;
 311        u16 wval = 0;
 312        u8 bval;
 313
 314        for (i = 0; i < 16; i++) {
 315                wval <<= 1;
 316
 317                pci_write_config_byte(pdev, 0x80, 0x80);
 318                udelay(160);
 319                pci_write_config_byte(pdev, 0x80, 0x40);
 320                udelay(160);
 321                pci_read_config_byte(pdev, 0x00, &bval);
 322
 323                if (bval == 0x22)
 324                        wval |= 1;
 325        }
 326
 327        return wval;
 328}
 329
 330static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
 331{
 332        u8 cmd = DC390_EEPROM_READ, i;
 333
 334        for (i = 0; i < DC390_EEPROM_LEN; i++) {
 335                pci_write_config_byte(pdev, 0xc0, 0);
 336                udelay(160);
 337
 338                dc390_eeprom_prepare_read(pdev, cmd++);
 339                *ptr++ = dc390_eeprom_get_data(pdev);
 340
 341                pci_write_config_byte(pdev, 0x80, 0);
 342                pci_write_config_byte(pdev, 0x80, 0);
 343                udelay(160);
 344        }
 345}
 346
 347static void dc390_check_eeprom(struct esp *esp)
 348{
 349        struct pci_dev *pdev = to_pci_dev(esp->dev);
 350        u8 EEbuf[128];
 351        u16 *ptr = (u16 *)EEbuf, wval = 0;
 352        int i;
 353
 354        dc390_read_eeprom(pdev, ptr);
 355
 356        for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
 357                wval += *ptr;
 358
 359        /* no Tekram EEprom found */
 360        if (wval != 0x1234) {
 361                dev_printk(KERN_INFO, &pdev->dev,
 362                           "No valid Tekram EEprom found\n");
 363                return;
 364        }
 365        esp->scsi_id = EEbuf[DC390_EE_ADAPT_SCSI_ID];
 366        esp->num_tags = 2 << EEbuf[DC390_EE_TAG_CMD_NUM];
 367        if (EEbuf[DC390_EE_MODE2] & DC390_EE_MODE2_ACTIVE_NEGATION)
 368                esp->config4 |= ESP_CONFIG4_RADE | ESP_CONFIG4_RAE;
 369}
 370
 371static int pci_esp_probe_one(struct pci_dev *pdev,
 372                              const struct pci_device_id *id)
 373{
 374        struct scsi_host_template *hostt = &scsi_esp_template;
 375        int err = -ENODEV;
 376        struct Scsi_Host *shost;
 377        struct esp *esp;
 378        struct pci_esp_priv *pep;
 379
 380        if (pci_enable_device(pdev)) {
 381                dev_printk(KERN_INFO, &pdev->dev, "cannot enable device\n");
 382                return -ENODEV;
 383        }
 384
 385        if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 386                dev_printk(KERN_INFO, &pdev->dev,
 387                           "failed to set 32bit DMA mask\n");
 388                goto fail_disable_device;
 389        }
 390
 391        shost = scsi_host_alloc(hostt, sizeof(struct esp));
 392        if (!shost) {
 393                dev_printk(KERN_INFO, &pdev->dev,
 394                           "failed to allocate scsi host\n");
 395                err = -ENOMEM;
 396                goto fail_disable_device;
 397        }
 398
 399        pep = kzalloc(sizeof(struct pci_esp_priv), GFP_KERNEL);
 400        if (!pep) {
 401                dev_printk(KERN_INFO, &pdev->dev,
 402                           "failed to allocate esp_priv\n");
 403                err = -ENOMEM;
 404                goto fail_host_alloc;
 405        }
 406
 407        esp = shost_priv(shost);
 408        esp->host = shost;
 409        esp->dev = &pdev->dev;
 410        esp->ops = &pci_esp_ops;
 411        /*
 412         * The am53c974 HBA has a design flaw of generating
 413         * spurious DMA completion interrupts when using
 414         * DMA for command submission.
 415         */
 416        esp->flags |= ESP_FLAG_USE_FIFO;
 417        /*
 418         * Enable CONFIG2_FENAB to allow for large DMA transfers
 419         */
 420        if (am53c974_fenab)
 421                esp->config2 |= ESP_CONFIG2_FENAB;
 422
 423        pep->esp = esp;
 424
 425        if (pci_request_regions(pdev, DRV_MODULE_NAME)) {
 426                dev_printk(KERN_ERR, &pdev->dev,
 427                           "pci memory selection failed\n");
 428                goto fail_priv_alloc;
 429        }
 430
 431        esp->regs = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
 432        if (!esp->regs) {
 433                dev_printk(KERN_ERR, &pdev->dev, "pci I/O map failed\n");
 434                err = -EINVAL;
 435                goto fail_release_regions;
 436        }
 437        esp->dma_regs = esp->regs;
 438
 439        pci_set_master(pdev);
 440
 441        esp->command_block = dma_alloc_coherent(&pdev->dev, 16,
 442                        &esp->command_block_dma, GFP_KERNEL);
 443        if (!esp->command_block) {
 444                dev_printk(KERN_ERR, &pdev->dev,
 445                           "failed to allocate command block\n");
 446                err = -ENOMEM;
 447                goto fail_unmap_regs;
 448        }
 449
 450        pci_set_drvdata(pdev, pep);
 451
 452        err = request_irq(pdev->irq, scsi_esp_intr, IRQF_SHARED,
 453                          DRV_MODULE_NAME, esp);
 454        if (err < 0) {
 455                dev_printk(KERN_ERR, &pdev->dev, "failed to register IRQ\n");
 456                goto fail_unmap_command_block;
 457        }
 458
 459        esp->scsi_id = 7;
 460        dc390_check_eeprom(esp);
 461
 462        shost->this_id = esp->scsi_id;
 463        shost->max_id = 8;
 464        shost->irq = pdev->irq;
 465        shost->io_port = pci_resource_start(pdev, 0);
 466        shost->n_io_port = pci_resource_len(pdev, 0);
 467        shost->unique_id = shost->io_port;
 468        esp->scsi_id_mask = (1 << esp->scsi_id);
 469        /* Assume 40MHz clock */
 470        esp->cfreq = 40000000;
 471
 472        err = scsi_esp_register(esp);
 473        if (err)
 474                goto fail_free_irq;
 475
 476        return 0;
 477
 478fail_free_irq:
 479        free_irq(pdev->irq, esp);
 480fail_unmap_command_block:
 481        pci_set_drvdata(pdev, NULL);
 482        dma_free_coherent(&pdev->dev, 16, esp->command_block,
 483                          esp->command_block_dma);
 484fail_unmap_regs:
 485        pci_iounmap(pdev, esp->regs);
 486fail_release_regions:
 487        pci_release_regions(pdev);
 488fail_priv_alloc:
 489        kfree(pep);
 490fail_host_alloc:
 491        scsi_host_put(shost);
 492fail_disable_device:
 493        pci_disable_device(pdev);
 494
 495        return err;
 496}
 497
 498static void pci_esp_remove_one(struct pci_dev *pdev)
 499{
 500        struct pci_esp_priv *pep = pci_get_drvdata(pdev);
 501        struct esp *esp = pep->esp;
 502
 503        scsi_esp_unregister(esp);
 504        free_irq(pdev->irq, esp);
 505        pci_set_drvdata(pdev, NULL);
 506        dma_free_coherent(&pdev->dev, 16, esp->command_block,
 507                          esp->command_block_dma);
 508        pci_iounmap(pdev, esp->regs);
 509        pci_release_regions(pdev);
 510        pci_disable_device(pdev);
 511        kfree(pep);
 512
 513        scsi_host_put(esp->host);
 514}
 515
 516static struct pci_device_id am53c974_pci_tbl[] = {
 517        { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI,
 518                PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 519        { }
 520};
 521MODULE_DEVICE_TABLE(pci, am53c974_pci_tbl);
 522
 523static struct pci_driver am53c974_driver = {
 524        .name           = DRV_MODULE_NAME,
 525        .id_table       = am53c974_pci_tbl,
 526        .probe          = pci_esp_probe_one,
 527        .remove         = pci_esp_remove_one,
 528};
 529
 530module_pci_driver(am53c974_driver);
 531
 532MODULE_DESCRIPTION("AM53C974 SCSI driver");
 533MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
 534MODULE_LICENSE("GPL");
 535MODULE_VERSION(DRV_MODULE_VERSION);
 536MODULE_ALIAS("tmscsim");
 537
 538module_param(am53c974_debug, bool, 0644);
 539MODULE_PARM_DESC(am53c974_debug, "Enable debugging");
 540
 541module_param(am53c974_fenab, bool, 0444);
 542MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes");
 543