linux/drivers/scsi/aic94xx/aic94xx_hwi.c
<<
>>
Prefs
   1/*
   2 * Aic94xx SAS/SATA driver hardware interface.
   3 *
   4 * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
   5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
   6 *
   7 * This file is licensed under GPLv2.
   8 *
   9 * This file is part of the aic94xx driver.
  10 *
  11 * The aic94xx driver is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU General Public License as
  13 * published by the Free Software Foundation; version 2 of the
  14 * License.
  15 *
  16 * The aic94xx driver is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with the aic94xx driver; if not, write to the Free Software
  23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  24 *
  25 */
  26
  27#include <linux/pci.h>
  28#include <linux/delay.h>
  29#include <linux/module.h>
  30#include <linux/firmware.h>
  31
  32#include "aic94xx.h"
  33#include "aic94xx_reg.h"
  34#include "aic94xx_hwi.h"
  35#include "aic94xx_seq.h"
  36#include "aic94xx_dump.h"
  37
  38u32 MBAR0_SWB_SIZE;
  39
  40/* ---------- Initialization ---------- */
  41
  42static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
  43{
  44        /* adapter came with a sas address */
  45        if (asd_ha->hw_prof.sas_addr[0])
  46                return 0;
  47
  48        return sas_request_addr(asd_ha->sas_ha.core.shost,
  49                                asd_ha->hw_prof.sas_addr);
  50}
  51
  52static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha)
  53{
  54        int i;
  55
  56        for (i = 0; i < ASD_MAX_PHYS; i++) {
  57                if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0)
  58                        continue;
  59                /* Set a phy's address only if it has none.
  60                 */
  61                ASD_DPRINTK("setting phy%d addr to %llx\n", i,
  62                            SAS_ADDR(asd_ha->hw_prof.sas_addr));
  63                memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr,
  64                       asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
  65        }
  66}
  67
  68/* ---------- PHY initialization ---------- */
  69
  70static void asd_init_phy_identify(struct asd_phy *phy)
  71{
  72        phy->identify_frame = phy->id_frm_tok->vaddr;
  73
  74        memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
  75
  76        phy->identify_frame->dev_type = SAS_END_DEV;
  77        if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
  78                phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
  79        if (phy->sas_phy.role & PHY_ROLE_TARGET)
  80                phy->identify_frame->target_bits = phy->sas_phy.tproto;
  81        memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr,
  82               SAS_ADDR_SIZE);
  83        phy->identify_frame->phy_id = phy->sas_phy.id;
  84}
  85
  86static int asd_init_phy(struct asd_phy *phy)
  87{
  88        struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
  89        struct asd_sas_phy *sas_phy = &phy->sas_phy;
  90
  91        sas_phy->enabled = 1;
  92        sas_phy->class = SAS;
  93        sas_phy->iproto = SAS_PROTOCOL_ALL;
  94        sas_phy->tproto = 0;
  95        sas_phy->type = PHY_TYPE_PHYSICAL;
  96        sas_phy->role = PHY_ROLE_INITIATOR;
  97        sas_phy->oob_mode = OOB_NOT_CONNECTED;
  98        sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
  99
 100        phy->id_frm_tok = asd_alloc_coherent(asd_ha,
 101                                             sizeof(*phy->identify_frame),
 102                                             GFP_KERNEL);
 103        if (!phy->id_frm_tok) {
 104                asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id);
 105                return -ENOMEM;
 106        } else
 107                asd_init_phy_identify(phy);
 108
 109        memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd));
 110
 111        return 0;
 112}
 113
 114static void asd_init_ports(struct asd_ha_struct *asd_ha)
 115{
 116        int i;
 117
 118        spin_lock_init(&asd_ha->asd_ports_lock);
 119        for (i = 0; i < ASD_MAX_PHYS; i++) {
 120                struct asd_port *asd_port = &asd_ha->asd_ports[i];
 121
 122                memset(asd_port->sas_addr, 0, SAS_ADDR_SIZE);
 123                memset(asd_port->attached_sas_addr, 0, SAS_ADDR_SIZE);
 124                asd_port->phy_mask = 0;
 125                asd_port->num_phys = 0;
 126        }
 127}
 128
 129static int asd_init_phys(struct asd_ha_struct *asd_ha)
 130{
 131        u8 i;
 132        u8 phy_mask = asd_ha->hw_prof.enabled_phys;
 133
 134        for (i = 0; i < ASD_MAX_PHYS; i++) {
 135                struct asd_phy *phy = &asd_ha->phys[i];
 136
 137                phy->phy_desc = &asd_ha->hw_prof.phy_desc[i];
 138                phy->asd_port = NULL;
 139
 140                phy->sas_phy.enabled = 0;
 141                phy->sas_phy.id = i;
 142                phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0];
 143                phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0];
 144                phy->sas_phy.ha = &asd_ha->sas_ha;
 145                phy->sas_phy.lldd_phy = phy;
 146        }
 147
 148        /* Now enable and initialize only the enabled phys. */
 149        for_each_phy(phy_mask, phy_mask, i) {
 150                int err = asd_init_phy(&asd_ha->phys[i]);
 151                if (err)
 152                        return err;
 153        }
 154
 155        return 0;
 156}
 157
 158/* ---------- Sliding windows ---------- */
 159
 160static int asd_init_sw(struct asd_ha_struct *asd_ha)
 161{
 162        struct pci_dev *pcidev = asd_ha->pcidev;
 163        int err;
 164        u32 v;
 165
 166        /* Unlock MBARs */
 167        err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v);
 168        if (err) {
 169                asd_printk("couldn't access conf. space of %s\n",
 170                           pci_name(pcidev));
 171                goto Err;
 172        }
 173        if (v)
 174                err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v);
 175        if (err) {
 176                asd_printk("couldn't write to MBAR_KEY of %s\n",
 177                           pci_name(pcidev));
 178                goto Err;
 179        }
 180
 181        /* Set sliding windows A, B and C to point to proper internal
 182         * memory regions.
 183         */
 184        pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR);
 185        pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB,
 186                               REG_BASE_ADDR_CSEQCIO);
 187        pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI);
 188        asd_ha->io_handle[0].swa_base = REG_BASE_ADDR;
 189        asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO;
 190        asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI;
 191        MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80;
 192        if (!asd_ha->iospace) {
 193                /* MBAR1 will point to OCM (On Chip Memory) */
 194                pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR);
 195                asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR;
 196        }
 197        spin_lock_init(&asd_ha->iolock);
 198Err:
 199        return err;
 200}
 201
 202/* ---------- SCB initialization ---------- */
 203
 204/**
 205 * asd_init_scbs - manually allocate the first SCB.
 206 * @asd_ha: pointer to host adapter structure
 207 *
 208 * This allocates the very first SCB which would be sent to the
 209 * sequencer for execution.  Its bus address is written to
 210 * CSEQ_Q_NEW_POINTER, mode page 2, mode 8.  Since the bus address of
 211 * the _next_ scb to be DMA-ed to the host adapter is read from the last
 212 * SCB DMA-ed to the host adapter, we have to always stay one step
 213 * ahead of the sequencer and keep one SCB already allocated.
 214 */
 215static int asd_init_scbs(struct asd_ha_struct *asd_ha)
 216{
 217        struct asd_seq_data *seq = &asd_ha->seq;
 218        int bitmap_bytes;
 219
 220        /* allocate the index array and bitmap */
 221        asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs;
 222        asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits*
 223                                             sizeof(void *), GFP_KERNEL);
 224        if (!asd_ha->seq.tc_index_array)
 225                return -ENOMEM;
 226
 227        bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
 228        bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
 229        asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
 230        if (!asd_ha->seq.tc_index_bitmap)
 231                return -ENOMEM;
 232
 233        spin_lock_init(&seq->tc_index_lock);
 234
 235        seq->next_scb.size = sizeof(struct scb);
 236        seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL,
 237                                             &seq->next_scb.dma_handle);
 238        if (!seq->next_scb.vaddr) {
 239                kfree(asd_ha->seq.tc_index_bitmap);
 240                kfree(asd_ha->seq.tc_index_array);
 241                asd_ha->seq.tc_index_bitmap = NULL;
 242                asd_ha->seq.tc_index_array = NULL;
 243                return -ENOMEM;
 244        }
 245
 246        seq->pending = 0;
 247        spin_lock_init(&seq->pend_q_lock);
 248        INIT_LIST_HEAD(&seq->pend_q);
 249
 250        return 0;
 251}
 252
 253static void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha)
 254{
 255        asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE;
 256        asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE;
 257        ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n",
 258                    asd_ha->hw_prof.max_scbs,
 259                    asd_ha->hw_prof.max_ddbs);
 260}
 261
 262/* ---------- Done List initialization ---------- */
 263
 264static void asd_dl_tasklet_handler(unsigned long);
 265
 266static int asd_init_dl(struct asd_ha_struct *asd_ha)
 267{
 268        asd_ha->seq.actual_dl
 269                = asd_alloc_coherent(asd_ha,
 270                             ASD_DL_SIZE * sizeof(struct done_list_struct),
 271                                     GFP_KERNEL);
 272        if (!asd_ha->seq.actual_dl)
 273                return -ENOMEM;
 274        asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr;
 275        asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE;
 276        asd_ha->seq.dl_next = 0;
 277        tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler,
 278                     (unsigned long) asd_ha);
 279
 280        return 0;
 281}
 282
 283/* ---------- EDB and ESCB init ---------- */
 284
 285static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, gfp_t gfp_flags)
 286{
 287        struct asd_seq_data *seq = &asd_ha->seq;
 288        int i;
 289
 290        seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags);
 291        if (!seq->edb_arr)
 292                return -ENOMEM;
 293
 294        for (i = 0; i < seq->num_edbs; i++) {
 295                seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE,
 296                                                     gfp_flags);
 297                if (!seq->edb_arr[i])
 298                        goto Err_unroll;
 299                memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE);
 300        }
 301
 302        ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs);
 303
 304        return 0;
 305
 306Err_unroll:
 307        for (i-- ; i >= 0; i--)
 308                asd_free_coherent(asd_ha, seq->edb_arr[i]);
 309        kfree(seq->edb_arr);
 310        seq->edb_arr = NULL;
 311
 312        return -ENOMEM;
 313}
 314
 315static int asd_alloc_escbs(struct asd_ha_struct *asd_ha,
 316                           gfp_t gfp_flags)
 317{
 318        struct asd_seq_data *seq = &asd_ha->seq;
 319        struct asd_ascb *escb;
 320        int i, escbs;
 321
 322        seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr),
 323                                gfp_flags);
 324        if (!seq->escb_arr)
 325                return -ENOMEM;
 326
 327        escbs = seq->num_escbs;
 328        escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags);
 329        if (!escb) {
 330                asd_printk("couldn't allocate list of escbs\n");
 331                goto Err;
 332        }
 333        seq->num_escbs -= escbs;  /* subtract what was not allocated */
 334        ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs);
 335
 336        for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next,
 337                                                               struct asd_ascb,
 338                                                               list)) {
 339                seq->escb_arr[i] = escb;
 340                escb->scb->header.opcode = EMPTY_SCB;
 341        }
 342
 343        return 0;
 344Err:
 345        kfree(seq->escb_arr);
 346        seq->escb_arr = NULL;
 347        return -ENOMEM;
 348
 349}
 350
 351static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha)
 352{
 353        struct asd_seq_data *seq = &asd_ha->seq;
 354        int i, k, z = 0;
 355
 356        for (i = 0; i < seq->num_escbs; i++) {
 357                struct asd_ascb *ascb = seq->escb_arr[i];
 358                struct empty_scb *escb = &ascb->scb->escb;
 359
 360                ascb->edb_index = z;
 361
 362                escb->num_valid = ASD_EDBS_PER_SCB;
 363
 364                for (k = 0; k < ASD_EDBS_PER_SCB; k++) {
 365                        struct sg_el *eb = &escb->eb[k];
 366                        struct asd_dma_tok *edb = seq->edb_arr[z++];
 367
 368                        memset(eb, 0, sizeof(*eb));
 369                        eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle));
 370                        eb->size = cpu_to_le32(((u32) edb->size));
 371                }
 372        }
 373}
 374
 375/**
 376 * asd_init_escbs -- allocate and initialize empty scbs
 377 * @asd_ha: pointer to host adapter structure
 378 *
 379 * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers.
 380 * They transport sense data, etc.
 381 */
 382static int asd_init_escbs(struct asd_ha_struct *asd_ha)
 383{
 384        struct asd_seq_data *seq = &asd_ha->seq;
 385        int err = 0;
 386
 387        /* Allocate two empty data buffers (edb) per sequencer. */
 388        int edbs = 2*(1+asd_ha->hw_prof.num_phys);
 389
 390        seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB;
 391        seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB;
 392
 393        err = asd_alloc_edbs(asd_ha, GFP_KERNEL);
 394        if (err) {
 395                asd_printk("couldn't allocate edbs\n");
 396                return err;
 397        }
 398
 399        err = asd_alloc_escbs(asd_ha, GFP_KERNEL);
 400        if (err) {
 401                asd_printk("couldn't allocate escbs\n");
 402                return err;
 403        }
 404
 405        asd_assign_edbs2escbs(asd_ha);
 406        /* In order to insure that normal SCBs do not overfill sequencer
 407         * memory and leave no space for escbs (halting condition),
 408         * we increment pending here by the number of escbs.  However,
 409         * escbs are never pending.
 410         */
 411        seq->pending   = seq->num_escbs;
 412        seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2;
 413
 414        return 0;
 415}
 416
 417/* ---------- HW initialization ---------- */
 418
 419/**
 420 * asd_chip_hardrst -- hard reset the chip
 421 * @asd_ha: pointer to host adapter structure
 422 *
 423 * This takes 16 cycles and is synchronous to CFCLK, which runs
 424 * at 200 MHz, so this should take at most 80 nanoseconds.
 425 */
 426int asd_chip_hardrst(struct asd_ha_struct *asd_ha)
 427{
 428        int i;
 429        int count = 100;
 430        u32 reg;
 431
 432        for (i = 0 ; i < 4 ; i++) {
 433                asd_write_reg_dword(asd_ha, COMBIST, HARDRST);
 434        }
 435
 436        do {
 437                udelay(1);
 438                reg = asd_read_reg_dword(asd_ha, CHIMINT);
 439                if (reg & HARDRSTDET) {
 440                        asd_write_reg_dword(asd_ha, CHIMINT,
 441                                            HARDRSTDET|PORRSTDET);
 442                        return 0;
 443                }
 444        } while (--count > 0);
 445
 446        return -ENODEV;
 447}
 448
 449/**
 450 * asd_init_chip -- initialize the chip
 451 * @asd_ha: pointer to host adapter structure
 452 *
 453 * Hard resets the chip, disables HA interrupts, downloads the sequnecer
 454 * microcode and starts the sequencers.  The caller has to explicitly
 455 * enable HA interrupts with asd_enable_ints(asd_ha).
 456 */
 457static int asd_init_chip(struct asd_ha_struct *asd_ha)
 458{
 459        int err;
 460
 461        err = asd_chip_hardrst(asd_ha);
 462        if (err) {
 463                asd_printk("couldn't hard reset %s\n",
 464                            pci_name(asd_ha->pcidev));
 465                goto out;
 466        }
 467
 468        asd_disable_ints(asd_ha);
 469
 470        err = asd_init_seqs(asd_ha);
 471        if (err) {
 472                asd_printk("couldn't init seqs for %s\n",
 473                           pci_name(asd_ha->pcidev));
 474                goto out;
 475        }
 476
 477        err = asd_start_seqs(asd_ha);
 478        if (err) {
 479                asd_printk("coudln't start seqs for %s\n",
 480                           pci_name(asd_ha->pcidev));
 481                goto out;
 482        }
 483out:
 484        return err;
 485}
 486
 487#define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE))
 488
 489static int max_devs = 0;
 490module_param_named(max_devs, max_devs, int, S_IRUGO);
 491MODULE_PARM_DESC(max_devs, "\n"
 492        "\tMaximum number of SAS devices to support (not LUs).\n"
 493        "\tDefault: 2176, Maximum: 65663.\n");
 494
 495static int max_cmnds = 0;
 496module_param_named(max_cmnds, max_cmnds, int, S_IRUGO);
 497MODULE_PARM_DESC(max_cmnds, "\n"
 498        "\tMaximum number of commands queuable.\n"
 499        "\tDefault: 512, Maximum: 66047.\n");
 500
 501static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha)
 502{
 503        unsigned long dma_addr = OCM_BASE_ADDR;
 504        u32 d;
 505
 506        dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
 507        asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr);
 508        d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
 509        d |= 4;
 510        asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
 511        asd_ha->hw_prof.max_ddbs += MAX_DEVS;
 512}
 513
 514static int asd_extend_devctx(struct asd_ha_struct *asd_ha)
 515{
 516        dma_addr_t dma_handle;
 517        unsigned long dma_addr;
 518        u32 d;
 519        int size;
 520
 521        asd_extend_devctx_ocm(asd_ha);
 522
 523        asd_ha->hw_prof.ddb_ext = NULL;
 524        if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) {
 525                max_devs = asd_ha->hw_prof.max_ddbs;
 526                return 0;
 527        }
 528
 529        size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE;
 530
 531        asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
 532        if (!asd_ha->hw_prof.ddb_ext) {
 533                asd_printk("couldn't allocate memory for %d devices\n",
 534                           max_devs);
 535                max_devs = asd_ha->hw_prof.max_ddbs;
 536                return -ENOMEM;
 537        }
 538        dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle;
 539        dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE);
 540        dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
 541        dma_handle = (dma_addr_t) dma_addr;
 542        asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle);
 543        d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
 544        d &= ~4;
 545        asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
 546
 547        asd_ha->hw_prof.max_ddbs = max_devs;
 548
 549        return 0;
 550}
 551
 552static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha)
 553{
 554        dma_addr_t dma_handle;
 555        unsigned long dma_addr;
 556        u32 d;
 557        int size;
 558
 559        asd_ha->hw_prof.scb_ext = NULL;
 560        if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) {
 561                max_cmnds = asd_ha->hw_prof.max_scbs;
 562                return 0;
 563        }
 564
 565        size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE;
 566
 567        asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
 568        if (!asd_ha->hw_prof.scb_ext) {
 569                asd_printk("couldn't allocate memory for %d commands\n",
 570                           max_cmnds);
 571                max_cmnds = asd_ha->hw_prof.max_scbs;
 572                return -ENOMEM;
 573        }
 574        dma_handle = asd_ha->hw_prof.scb_ext->dma_handle;
 575        dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE);
 576        dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE;
 577        dma_handle = (dma_addr_t) dma_addr;
 578        asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle);
 579        d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
 580        d &= ~1;
 581        asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
 582
 583        asd_ha->hw_prof.max_scbs = max_cmnds;
 584
 585        return 0;
 586}
 587
 588/**
 589 * asd_init_ctxmem -- initialize context memory
 590 * asd_ha: pointer to host adapter structure
 591 *
 592 * This function sets the maximum number of SCBs and
 593 * DDBs which can be used by the sequencer.  This is normally
 594 * 512 and 128 respectively.  If support for more SCBs or more DDBs
 595 * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are
 596 * initialized here to extend context memory to point to host memory,
 597 * thus allowing unlimited support for SCBs and DDBs -- only limited
 598 * by host memory.
 599 */
 600static int asd_init_ctxmem(struct asd_ha_struct *asd_ha)
 601{
 602        int bitmap_bytes;
 603
 604        asd_get_max_scb_ddb(asd_ha);
 605        asd_extend_devctx(asd_ha);
 606        asd_extend_cmdctx(asd_ha);
 607
 608        /* The kernel wants bitmaps to be unsigned long sized. */
 609        bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8;
 610        bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
 611        asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
 612        if (!asd_ha->hw_prof.ddb_bitmap)
 613                return -ENOMEM;
 614        spin_lock_init(&asd_ha->hw_prof.ddb_lock);
 615
 616        return 0;
 617}
 618
 619int asd_init_hw(struct asd_ha_struct *asd_ha)
 620{
 621        int err;
 622        u32 v;
 623
 624        err = asd_init_sw(asd_ha);
 625        if (err)
 626                return err;
 627
 628        err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v);
 629        if (err) {
 630                asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n",
 631                           pci_name(asd_ha->pcidev));
 632                return err;
 633        }
 634        pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
 635                                        v | SC_TMR_DIS);
 636        if (err) {
 637                asd_printk("couldn't disable split completion timer of %s\n",
 638                           pci_name(asd_ha->pcidev));
 639                return err;
 640        }
 641
 642        err = asd_read_ocm(asd_ha);
 643        if (err) {
 644                asd_printk("couldn't read ocm(%d)\n", err);
 645                /* While suspicios, it is not an error that we
 646                 * couldn't read the OCM. */
 647        }
 648
 649        err = asd_read_flash(asd_ha);
 650        if (err) {
 651                asd_printk("couldn't read flash(%d)\n", err);
 652                /* While suspicios, it is not an error that we
 653                 * couldn't read FLASH memory.
 654                 */
 655        }
 656
 657        asd_init_ctxmem(asd_ha);
 658
 659        if (asd_get_user_sas_addr(asd_ha)) {
 660                asd_printk("No SAS Address provided for %s\n",
 661                           pci_name(asd_ha->pcidev));
 662                err = -ENODEV;
 663                goto Out;
 664        }
 665
 666        asd_propagate_sas_addr(asd_ha);
 667
 668        err = asd_init_phys(asd_ha);
 669        if (err) {
 670                asd_printk("couldn't initialize phys for %s\n",
 671                            pci_name(asd_ha->pcidev));
 672                goto Out;
 673        }
 674
 675        asd_init_ports(asd_ha);
 676
 677        err = asd_init_scbs(asd_ha);
 678        if (err) {
 679                asd_printk("couldn't initialize scbs for %s\n",
 680                            pci_name(asd_ha->pcidev));
 681                goto Out;
 682        }
 683
 684        err = asd_init_dl(asd_ha);
 685        if (err) {
 686                asd_printk("couldn't initialize the done list:%d\n",
 687                            err);
 688                goto Out;
 689        }
 690
 691        err = asd_init_escbs(asd_ha);
 692        if (err) {
 693                asd_printk("couldn't initialize escbs\n");
 694                goto Out;
 695        }
 696
 697        err = asd_init_chip(asd_ha);
 698        if (err) {
 699                asd_printk("couldn't init the chip\n");
 700                goto Out;
 701        }
 702Out:
 703        return err;
 704}
 705
 706/* ---------- Chip reset ---------- */
 707
 708/**
 709 * asd_chip_reset -- reset the host adapter, etc
 710 * @asd_ha: pointer to host adapter structure of interest
 711 *
 712 * Called from the ISR.  Hard reset the chip.  Let everything
 713 * timeout.  This should be no different than hot-unplugging the
 714 * host adapter.  Once everything times out we'll init the chip with
 715 * a call to asd_init_chip() and enable interrupts with asd_enable_ints().
 716 * XXX finish.
 717 */
 718static void asd_chip_reset(struct asd_ha_struct *asd_ha)
 719{
 720        struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
 721
 722        ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
 723        asd_chip_hardrst(asd_ha);
 724        sas_ha->notify_ha_event(sas_ha, HAE_RESET);
 725}
 726
 727/* ---------- Done List Routines ---------- */
 728
 729static void asd_dl_tasklet_handler(unsigned long data)
 730{
 731        struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data;
 732        struct asd_seq_data *seq = &asd_ha->seq;
 733        unsigned long flags;
 734
 735        while (1) {
 736                struct done_list_struct *dl = &seq->dl[seq->dl_next];
 737                struct asd_ascb *ascb;
 738
 739                if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle)
 740                        break;
 741
 742                /* find the aSCB */
 743                spin_lock_irqsave(&seq->tc_index_lock, flags);
 744                ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index));
 745                spin_unlock_irqrestore(&seq->tc_index_lock, flags);
 746                if (unlikely(!ascb)) {
 747                        ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n");
 748                        goto next_1;
 749                } else if (ascb->scb->header.opcode == EMPTY_SCB) {
 750                        goto out;
 751                } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) {
 752                        goto next_1;
 753                }
 754                spin_lock_irqsave(&seq->pend_q_lock, flags);
 755                list_del_init(&ascb->list);
 756                seq->pending--;
 757                spin_unlock_irqrestore(&seq->pend_q_lock, flags);
 758        out:
 759                ascb->tasklet_complete(ascb, dl);
 760
 761        next_1:
 762                seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1);
 763                if (!seq->dl_next)
 764                        seq->dl_toggle ^= DL_TOGGLE_MASK;
 765        }
 766}
 767
 768/* ---------- Interrupt Service Routines ---------- */
 769
 770/**
 771 * asd_process_donelist_isr -- schedule processing of done list entries
 772 * @asd_ha: pointer to host adapter structure
 773 */
 774static void asd_process_donelist_isr(struct asd_ha_struct *asd_ha)
 775{
 776        tasklet_schedule(&asd_ha->seq.dl_tasklet);
 777}
 778
 779/**
 780 * asd_com_sas_isr -- process device communication interrupt (COMINT)
 781 * @asd_ha: pointer to host adapter structure
 782 */
 783static void asd_com_sas_isr(struct asd_ha_struct *asd_ha)
 784{
 785        u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT);
 786
 787        /* clear COMSTAT int */
 788        asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF);
 789
 790        if (comstat & CSBUFPERR) {
 791                asd_printk("%s: command/status buffer dma parity error\n",
 792                           pci_name(asd_ha->pcidev));
 793        } else if (comstat & CSERR) {
 794                int i;
 795                u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
 796                dmaerr &= 0xFF;
 797                asd_printk("%s: command/status dma error, DMAERR: 0x%02x, "
 798                           "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n",
 799                           pci_name(asd_ha->pcidev),
 800                           dmaerr,
 801                           asd_read_reg_dword(asd_ha, CSDMAADR),
 802                           asd_read_reg_dword(asd_ha, CSDMAADR+4));
 803                asd_printk("CSBUFFER:\n");
 804                for (i = 0; i < 8; i++) {
 805                        asd_printk("%08x %08x %08x %08x\n",
 806                                   asd_read_reg_dword(asd_ha, CSBUFFER),
 807                                   asd_read_reg_dword(asd_ha, CSBUFFER+4),
 808                                   asd_read_reg_dword(asd_ha, CSBUFFER+8),
 809                                   asd_read_reg_dword(asd_ha, CSBUFFER+12));
 810                }
 811                asd_dump_seq_state(asd_ha, 0);
 812        } else if (comstat & OVLYERR) {
 813                u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
 814                dmaerr = (dmaerr >> 8) & 0xFF;
 815                asd_printk("%s: overlay dma error:0x%x\n",
 816                           pci_name(asd_ha->pcidev),
 817                           dmaerr);
 818        }
 819        asd_chip_reset(asd_ha);
 820}
 821
 822static void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus)
 823{
 824        static const char *halt_code[256] = {
 825                "UNEXPECTED_INTERRUPT0",
 826                "UNEXPECTED_INTERRUPT1",
 827                "UNEXPECTED_INTERRUPT2",
 828                "UNEXPECTED_INTERRUPT3",
 829                "UNEXPECTED_INTERRUPT4",
 830                "UNEXPECTED_INTERRUPT5",
 831                "UNEXPECTED_INTERRUPT6",
 832                "UNEXPECTED_INTERRUPT7",
 833                "UNEXPECTED_INTERRUPT8",
 834                "UNEXPECTED_INTERRUPT9",
 835                "UNEXPECTED_INTERRUPT10",
 836                [11 ... 19] = "unknown[11,19]",
 837                "NO_FREE_SCB_AVAILABLE",
 838                "INVALID_SCB_OPCODE",
 839                "INVALID_MBX_OPCODE",
 840                "INVALID_ATA_STATE",
 841                "ATA_QUEUE_FULL",
 842                "ATA_TAG_TABLE_FAULT",
 843                "ATA_TAG_MASK_FAULT",
 844                "BAD_LINK_QUEUE_STATE",
 845                "DMA2CHIM_QUEUE_ERROR",
 846                "EMPTY_SCB_LIST_FULL",
 847                "unknown[30]",
 848                "IN_USE_SCB_ON_FREE_LIST",
 849                "BAD_OPEN_WAIT_STATE",
 850                "INVALID_STP_AFFILIATION",
 851                "unknown[34]",
 852                "EXEC_QUEUE_ERROR",
 853                "TOO_MANY_EMPTIES_NEEDED",
 854                "EMPTY_REQ_QUEUE_ERROR",
 855                "Q_MONIRTT_MGMT_ERROR",
 856                "TARGET_MODE_FLOW_ERROR",
 857                "DEVICE_QUEUE_NOT_FOUND",
 858                "START_IRTT_TIMER_ERROR",
 859                "ABORT_TASK_ILLEGAL_REQ",
 860                [43 ... 255] = "unknown[43,255]"
 861        };
 862
 863        if (dchstatus & CSEQINT) {
 864                u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT);
 865
 866                if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) {
 867                        asd_printk("%s: CSEQ arp2int:0x%x\n",
 868                                   pci_name(asd_ha->pcidev),
 869                                   arp2int);
 870                } else if (arp2int & ARP2HALTC)
 871                        asd_printk("%s: CSEQ halted: %s\n",
 872                                   pci_name(asd_ha->pcidev),
 873                                   halt_code[(arp2int>>16)&0xFF]);
 874                else
 875                        asd_printk("%s: CARP2INT:0x%x\n",
 876                                   pci_name(asd_ha->pcidev),
 877                                   arp2int);
 878        }
 879        if (dchstatus & LSEQINT_MASK) {
 880                int lseq;
 881                u8  lseq_mask = dchstatus & LSEQINT_MASK;
 882
 883                for_each_sequencer(lseq_mask, lseq_mask, lseq) {
 884                        u32 arp2int = asd_read_reg_dword(asd_ha,
 885                                                         LmARP2INT(lseq));
 886                        if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR
 887                                       | ARP2CIOPERR)) {
 888                                asd_printk("%s: LSEQ%d arp2int:0x%x\n",
 889                                           pci_name(asd_ha->pcidev),
 890                                           lseq, arp2int);
 891                                /* XXX we should only do lseq reset */
 892                        } else if (arp2int & ARP2HALTC)
 893                                asd_printk("%s: LSEQ%d halted: %s\n",
 894                                           pci_name(asd_ha->pcidev),
 895                                           lseq,halt_code[(arp2int>>16)&0xFF]);
 896                        else
 897                                asd_printk("%s: LSEQ%d ARP2INT:0x%x\n",
 898                                           pci_name(asd_ha->pcidev), lseq,
 899                                           arp2int);
 900                }
 901        }
 902        asd_chip_reset(asd_ha);
 903}
 904
 905/**
 906 * asd_dch_sas_isr -- process device channel interrupt (DEVINT)
 907 * @asd_ha: pointer to host adapter structure
 908 */
 909static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
 910{
 911        u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS);
 912
 913        if (dchstatus & CFIFTOERR) {
 914                asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev));
 915                asd_chip_reset(asd_ha);
 916        } else
 917                asd_arp2_err(asd_ha, dchstatus);
 918}
 919
 920/**
 921 * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
 922 * @asd_ha: pointer to host adapter structure
 923 */
 924static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
 925{
 926        u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R);
 927
 928        if (!(stat0r & ASIERR)) {
 929                asd_printk("hmm, EXSI interrupted but no error?\n");
 930                return;
 931        }
 932
 933        if (stat0r & ASIFMTERR) {
 934                asd_printk("ASI SEEPROM format error for %s\n",
 935                           pci_name(asd_ha->pcidev));
 936        } else if (stat0r & ASISEECHKERR) {
 937                u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R);
 938                asd_printk("ASI SEEPROM checksum 0x%x error for %s\n",
 939                           stat1r & CHECKSUM_MASK,
 940                           pci_name(asd_ha->pcidev));
 941        } else {
 942                u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR);
 943
 944                if (!(statr & CPI2ASIMSTERR_MASK)) {
 945                        ASD_DPRINTK("hmm, ASIERR?\n");
 946                        return;
 947                } else {
 948                        u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR);
 949                        u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR);
 950
 951                        asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, "
 952                                   "count: 0x%x, byteen: 0x%x, targerr: 0x%x "
 953                                   "master id: 0x%x, master err: 0x%x\n",
 954                                   pci_name(asd_ha->pcidev),
 955                                   addr, data,
 956                                   (statr & CPI2ASIBYTECNT_MASK) >> 16,
 957                                   (statr & CPI2ASIBYTEEN_MASK) >> 12,
 958                                   (statr & CPI2ASITARGERR_MASK) >> 8,
 959                                   (statr & CPI2ASITARGMID_MASK) >> 4,
 960                                   (statr & CPI2ASIMSTERR_MASK));
 961                }
 962        }
 963        asd_chip_reset(asd_ha);
 964}
 965
 966/**
 967 * asd_hst_pcix_isr -- process host interface interrupts
 968 * @asd_ha: pointer to host adapter structure
 969 *
 970 * Asserted on PCIX errors: target abort, etc.
 971 */
 972static void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha)
 973{
 974        u16 status;
 975        u32 pcix_status;
 976        u32 ecc_status;
 977
 978        pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status);
 979        pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status);
 980        pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status);
 981
 982        if (status & PCI_STATUS_DETECTED_PARITY)
 983                asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev));
 984        else if (status & PCI_STATUS_REC_MASTER_ABORT)
 985                asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev));
 986        else if (status & PCI_STATUS_REC_TARGET_ABORT)
 987                asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev));
 988        else if (status & PCI_STATUS_PARITY)
 989                asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev));
 990        else if (pcix_status & RCV_SCE) {
 991                asd_printk("received split completion error for %s\n",
 992                           pci_name(asd_ha->pcidev));
 993                pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
 994                /* XXX: Abort task? */
 995                return;
 996        } else if (pcix_status & UNEXP_SC) {
 997                asd_printk("unexpected split completion for %s\n",
 998                           pci_name(asd_ha->pcidev));
 999                pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
1000                /* ignore */
1001                return;
1002        } else if (pcix_status & SC_DISCARD)
1003                asd_printk("split completion discarded for %s\n",
1004                           pci_name(asd_ha->pcidev));
1005        else if (ecc_status & UNCOR_ECCERR)
1006                asd_printk("uncorrectable ECC error for %s\n",
1007                           pci_name(asd_ha->pcidev));
1008        asd_chip_reset(asd_ha);
1009}
1010
1011/**
1012 * asd_hw_isr -- host adapter interrupt service routine
1013 * @irq: ignored
1014 * @dev_id: pointer to host adapter structure
1015 *
1016 * The ISR processes done list entries and level 3 error handling.
1017 */
1018irqreturn_t asd_hw_isr(int irq, void *dev_id)
1019{
1020        struct asd_ha_struct *asd_ha = dev_id;
1021        u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT);
1022
1023        if (!chimint)
1024                return IRQ_NONE;
1025
1026        asd_write_reg_dword(asd_ha, CHIMINT, chimint);
1027        (void) asd_read_reg_dword(asd_ha, CHIMINT);
1028
1029        if (chimint & DLAVAIL)
1030                asd_process_donelist_isr(asd_ha);
1031        if (chimint & COMINT)
1032                asd_com_sas_isr(asd_ha);
1033        if (chimint & DEVINT)
1034                asd_dch_sas_isr(asd_ha);
1035        if (chimint & INITERR)
1036                asd_rbi_exsi_isr(asd_ha);
1037        if (chimint & HOSTERR)
1038                asd_hst_pcix_isr(asd_ha);
1039
1040        return IRQ_HANDLED;
1041}
1042
1043/* ---------- SCB handling ---------- */
1044
1045static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
1046                                       gfp_t gfp_flags)
1047{
1048        extern struct kmem_cache *asd_ascb_cache;
1049        struct asd_seq_data *seq = &asd_ha->seq;
1050        struct asd_ascb *ascb;
1051        unsigned long flags;
1052
1053        ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags);
1054
1055        if (ascb) {
1056                ascb->dma_scb.size = sizeof(struct scb);
1057                ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
1058                                                     gfp_flags,
1059                                                    &ascb->dma_scb.dma_handle);
1060                if (!ascb->dma_scb.vaddr) {
1061                        kmem_cache_free(asd_ascb_cache, ascb);
1062                        return NULL;
1063                }
1064                memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb));
1065                asd_init_ascb(asd_ha, ascb);
1066
1067                spin_lock_irqsave(&seq->tc_index_lock, flags);
1068                ascb->tc_index = asd_tc_index_get(seq, ascb);
1069                spin_unlock_irqrestore(&seq->tc_index_lock, flags);
1070                if (ascb->tc_index == -1)
1071                        goto undo;
1072
1073                ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index);
1074        }
1075
1076        return ascb;
1077undo:
1078        dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
1079                      ascb->dma_scb.dma_handle);
1080        kmem_cache_free(asd_ascb_cache, ascb);
1081        ASD_DPRINTK("no index for ascb\n");
1082        return NULL;
1083}
1084
1085/**
1086 * asd_ascb_alloc_list -- allocate a list of aSCBs
1087 * @asd_ha: pointer to host adapter structure
1088 * @num: pointer to integer number of aSCBs
1089 * @gfp_flags: GFP_ flags.
1090 *
1091 * This is the only function which is used to allocate aSCBs.
1092 * It can allocate one or many. If more than one, then they form
1093 * a linked list in two ways: by their list field of the ascb struct
1094 * and by the next_scb field of the scb_header.
1095 *
1096 * Returns NULL if no memory was available, else pointer to a list
1097 * of ascbs.  When this function returns, @num would be the number
1098 * of SCBs which were not able to be allocated, 0 if all requested
1099 * were able to be allocated.
1100 */
1101struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
1102                                     *asd_ha, int *num,
1103                                     gfp_t gfp_flags)
1104{
1105        struct asd_ascb *first = NULL;
1106
1107        for ( ; *num > 0; --*num) {
1108                struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags);
1109
1110                if (!ascb)
1111                        break;
1112                else if (!first)
1113                        first = ascb;
1114                else {
1115                        struct asd_ascb *last = list_entry(first->list.prev,
1116                                                           struct asd_ascb,
1117                                                           list);
1118                        list_add_tail(&ascb->list, &first->list);
1119                        last->scb->header.next_scb =
1120                                cpu_to_le64(((u64)ascb->dma_scb.dma_handle));
1121                }
1122        }
1123
1124        return first;
1125}
1126
1127/**
1128 * asd_swap_head_scb -- swap the head scb
1129 * @asd_ha: pointer to host adapter structure
1130 * @ascb: pointer to the head of an ascb list
1131 *
1132 * The sequencer knows the DMA address of the next SCB to be DMAed to
1133 * the host adapter, from initialization or from the last list DMAed.
1134 * seq->next_scb keeps the address of this SCB.  The sequencer will
1135 * DMA to the host adapter this list of SCBs.  But the head (first
1136 * element) of this list is not known to the sequencer.  Here we swap
1137 * the head of the list with the known SCB (memcpy()).
1138 * Only one memcpy() is required per list so it is in our interest
1139 * to keep the list of SCB as long as possible so that the ratio
1140 * of number of memcpy calls to the number of SCB DMA-ed is as small
1141 * as possible.
1142 *
1143 * LOCKING: called with the pending list lock held.
1144 */
1145static void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
1146                              struct asd_ascb *ascb)
1147{
1148        struct asd_seq_data *seq = &asd_ha->seq;
1149        struct asd_ascb *last = list_entry(ascb->list.prev,
1150                                           struct asd_ascb,
1151                                           list);
1152        struct asd_dma_tok t = ascb->dma_scb;
1153
1154        memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb));
1155        ascb->dma_scb = seq->next_scb;
1156        ascb->scb = ascb->dma_scb.vaddr;
1157        seq->next_scb = t;
1158        last->scb->header.next_scb =
1159                cpu_to_le64(((u64)seq->next_scb.dma_handle));
1160}
1161
1162/**
1163 * asd_start_timers -- (add and) start timers of SCBs
1164 * @list: pointer to struct list_head of the scbs
1165 * @to: timeout in jiffies
1166 *
1167 * If an SCB in the @list has no timer function, assign the default
1168 * one,  then start the timer of the SCB.  This function is
1169 * intended to be called from asd_post_ascb_list(), just prior to
1170 * posting the SCBs to the sequencer.
1171 */
1172static void asd_start_scb_timers(struct list_head *list)
1173{
1174        struct asd_ascb *ascb;
1175        list_for_each_entry(ascb, list, list) {
1176                if (!ascb->uldd_timer) {
1177                        ascb->timer.data = (unsigned long) ascb;
1178                        ascb->timer.function = asd_ascb_timedout;
1179                        ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
1180                        add_timer(&ascb->timer);
1181                }
1182        }
1183}
1184
1185/**
1186 * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter
1187 * @asd_ha: pointer to a host adapter structure
1188 * @ascb: pointer to the first aSCB in the list
1189 * @num: number of aSCBs in the list (to be posted)
1190 *
1191 * See queueing comment in asd_post_escb_list().
1192 *
1193 * Additional note on queuing: In order to minimize the ratio of memcpy()
1194 * to the number of ascbs sent, we try to batch-send as many ascbs as possible
1195 * in one go.
1196 * Two cases are possible:
1197 *    A) can_queue >= num,
1198 *    B) can_queue < num.
1199 * Case A: we can send the whole batch at once.  Increment "pending"
1200 * in the beginning of this function, when it is checked, in order to
1201 * eliminate races when this function is called by multiple processes.
1202 * Case B: should never happen if the managing layer considers
1203 * lldd_queue_size.
1204 */
1205int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1206                       int num)
1207{
1208        unsigned long flags;
1209        LIST_HEAD(list);
1210        int can_queue;
1211
1212        spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
1213        can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending;
1214        if (can_queue >= num)
1215                asd_ha->seq.pending += num;
1216        else
1217                can_queue = 0;
1218
1219        if (!can_queue) {
1220                spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1221                asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev));
1222                return -SAS_QUEUE_FULL;
1223        }
1224
1225        asd_swap_head_scb(asd_ha, ascb);
1226
1227        __list_add(&list, ascb->list.prev, &ascb->list);
1228
1229        asd_start_scb_timers(&list);
1230
1231        asd_ha->seq.scbpro += num;
1232        list_splice_init(&list, asd_ha->seq.pend_q.prev);
1233        asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
1234        spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1235
1236        return 0;
1237}
1238
1239/**
1240 * asd_post_escb_list -- post a list of 1 or more empty scb
1241 * @asd_ha: pointer to a host adapter structure
1242 * @ascb: pointer to the first empty SCB in the list
1243 * @num: number of aSCBs in the list (to be posted)
1244 *
1245 * This is essentially the same as asd_post_ascb_list, but we do not
1246 * increment pending, add those to the pending list or get indexes.
1247 * See asd_init_escbs() and asd_init_post_escbs().
1248 *
1249 * Since sending a list of ascbs is a superset of sending a single
1250 * ascb, this function exists to generalize this.  More specifically,
1251 * when sending a list of those, we want to do only a _single_
1252 * memcpy() at swap head, as opposed to for each ascb sent (in the
1253 * case of sending them one by one).  That is, we want to minimize the
1254 * ratio of memcpy() operations to the number of ascbs sent.  The same
1255 * logic applies to asd_post_ascb_list().
1256 */
1257int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1258                       int num)
1259{
1260        unsigned long flags;
1261
1262        spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
1263        asd_swap_head_scb(asd_ha, ascb);
1264        asd_ha->seq.scbpro += num;
1265        asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
1266        spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1267
1268        return 0;
1269}
1270
1271/* ---------- LED ---------- */
1272
1273/**
1274 * asd_turn_led -- turn on/off an LED
1275 * @asd_ha: pointer to host adapter structure
1276 * @phy_id: the PHY id whose LED we want to manupulate
1277 * @op: 1 to turn on, 0 to turn off
1278 */
1279void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
1280{
1281        if (phy_id < ASD_MAX_PHYS) {
1282                u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id));
1283                if (op)
1284                        v |= LEDPOL;
1285                else
1286                        v &= ~LEDPOL;
1287                asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v);
1288        }
1289}
1290
1291/**
1292 * asd_control_led -- enable/disable an LED on the board
1293 * @asd_ha: pointer to host adapter structure
1294 * @phy_id: integer, the phy id
1295 * @op: integer, 1 to enable, 0 to disable the LED
1296 *
1297 * First we output enable the LED, then we set the source
1298 * to be an external module.
1299 */
1300void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
1301{
1302        if (phy_id < ASD_MAX_PHYS) {
1303                u32 v;
1304
1305                v = asd_read_reg_dword(asd_ha, GPIOOER);
1306                if (op)
1307                        v |= (1 << phy_id);
1308                else
1309                        v &= ~(1 << phy_id);
1310                asd_write_reg_dword(asd_ha, GPIOOER, v);
1311
1312                v = asd_read_reg_dword(asd_ha, GPIOCNFGR);
1313                if (op)
1314                        v |= (1 << phy_id);
1315                else
1316                        v &= ~(1 << phy_id);
1317                asd_write_reg_dword(asd_ha, GPIOCNFGR, v);
1318        }
1319}
1320
1321/* ---------- PHY enable ---------- */
1322
1323static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id)
1324{
1325        struct asd_phy *phy = &asd_ha->phys[phy_id];
1326
1327        asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0);
1328        asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY),
1329                           HOTPLUG_DELAY_TIMEOUT);
1330
1331        /* Get defaults from manuf. sector */
1332        /* XXX we need defaults for those in case MS is broken. */
1333        asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0),
1334                           phy->phy_desc->phy_control_0);
1335        asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1),
1336                           phy->phy_desc->phy_control_1);
1337        asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2),
1338                           phy->phy_desc->phy_control_2);
1339        asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3),
1340                           phy->phy_desc->phy_control_3);
1341
1342        asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id),
1343                            ASD_COMINIT_TIMEOUT);
1344
1345        asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id),
1346                           phy->id_frm_tok->dma_handle);
1347
1348        asd_control_led(asd_ha, phy_id, 1);
1349
1350        return 0;
1351}
1352
1353int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
1354{
1355        u8  phy_m;
1356        u8  i;
1357        int num = 0, k;
1358        struct asd_ascb *ascb;
1359        struct asd_ascb *ascb_list;
1360
1361        if (!phy_mask) {
1362                asd_printk("%s called with phy_mask of 0!?\n", __func__);
1363                return 0;
1364        }
1365
1366        for_each_phy(phy_mask, phy_m, i) {
1367                num++;
1368                asd_enable_phy(asd_ha, i);
1369        }
1370
1371        k = num;
1372        ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL);
1373        if (!ascb_list) {
1374                asd_printk("no memory for control phy ascb list\n");
1375                return -ENOMEM;
1376        }
1377        num -= k;
1378
1379        ascb = ascb_list;
1380        for_each_phy(phy_mask, phy_m, i) {
1381                asd_build_control_phy(ascb, i, ENABLE_PHY);
1382                ascb = list_entry(ascb->list.next, struct asd_ascb, list);
1383        }
1384        ASD_DPRINTK("posting %d control phy scbs\n", num);
1385        k = asd_post_ascb_list(asd_ha, ascb_list, num);
1386        if (k)
1387                asd_ascb_free_list(ascb_list);
1388
1389        return k;
1390}
1391