linux/drivers/scsi/hisi_sas/hisi_sas_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015 Linaro Ltd.
   3 * Copyright (c) 2015 Hisilicon Limited.
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 *
  10 */
  11
  12#include "hisi_sas.h"
  13#define DRV_NAME "hisi_sas"
  14
  15#define DEV_IS_GONE(dev) \
  16        ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
  17
  18static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
  19                                u8 *lun, struct hisi_sas_tmf_task *tmf);
  20
  21static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
  22{
  23        return device->port->ha->lldd_ha;
  24}
  25
  26static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
  27{
  28        void *bitmap = hisi_hba->slot_index_tags;
  29
  30        clear_bit(slot_idx, bitmap);
  31}
  32
  33static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
  34{
  35        hisi_sas_slot_index_clear(hisi_hba, slot_idx);
  36}
  37
  38static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
  39{
  40        void *bitmap = hisi_hba->slot_index_tags;
  41
  42        set_bit(slot_idx, bitmap);
  43}
  44
  45static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
  46{
  47        unsigned int index;
  48        void *bitmap = hisi_hba->slot_index_tags;
  49
  50        index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
  51        if (index >= hisi_hba->slot_index_count)
  52                return -SAS_QUEUE_FULL;
  53        hisi_sas_slot_index_set(hisi_hba, index);
  54        *slot_idx = index;
  55        return 0;
  56}
  57
  58static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
  59{
  60        int i;
  61
  62        for (i = 0; i < hisi_hba->slot_index_count; ++i)
  63                hisi_sas_slot_index_clear(hisi_hba, i);
  64}
  65
  66void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
  67                             struct hisi_sas_slot *slot)
  68{
  69        struct device *dev = &hisi_hba->pdev->dev;
  70
  71        if (!slot->task)
  72                return;
  73
  74        if (!sas_protocol_ata(task->task_proto))
  75                if (slot->n_elem)
  76                        dma_unmap_sg(dev, task->scatter, slot->n_elem,
  77                                     task->data_dir);
  78
  79        if (slot->command_table)
  80                dma_pool_free(hisi_hba->command_table_pool,
  81                              slot->command_table, slot->command_table_dma);
  82
  83        if (slot->status_buffer)
  84                dma_pool_free(hisi_hba->status_buffer_pool,
  85                              slot->status_buffer, slot->status_buffer_dma);
  86
  87        if (slot->sge_page)
  88                dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
  89                              slot->sge_page_dma);
  90
  91        list_del_init(&slot->entry);
  92        task->lldd_task = NULL;
  93        slot->task = NULL;
  94        slot->port = NULL;
  95        hisi_sas_slot_index_free(hisi_hba, slot->idx);
  96        memset(slot, 0, sizeof(*slot));
  97}
  98EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
  99
 100static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
 101                                  struct hisi_sas_slot *slot)
 102{
 103        return hisi_hba->hw->prep_smp(hisi_hba, slot);
 104}
 105
 106static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
 107                                  struct hisi_sas_slot *slot, int is_tmf,
 108                                  struct hisi_sas_tmf_task *tmf)
 109{
 110        return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
 111}
 112
 113static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
 114                                  struct hisi_sas_slot *slot)
 115{
 116        return hisi_hba->hw->prep_stp(hisi_hba, slot);
 117}
 118
 119/*
 120 * This function will issue an abort TMF regardless of whether the
 121 * task is in the sdev or not. Then it will do the task complete
 122 * cleanup and callbacks.
 123 */
 124static void hisi_sas_slot_abort(struct work_struct *work)
 125{
 126        struct hisi_sas_slot *abort_slot =
 127                container_of(work, struct hisi_sas_slot, abort_slot);
 128        struct sas_task *task = abort_slot->task;
 129        struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
 130        struct scsi_cmnd *cmnd = task->uldd_task;
 131        struct hisi_sas_tmf_task tmf_task;
 132        struct domain_device *device = task->dev;
 133        struct hisi_sas_device *sas_dev = device->lldd_dev;
 134        struct scsi_lun lun;
 135        struct device *dev = &hisi_hba->pdev->dev;
 136        int tag = abort_slot->idx;
 137
 138        if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
 139                dev_err(dev, "cannot abort slot for non-ssp task\n");
 140                goto out;
 141        }
 142
 143        int_to_scsilun(cmnd->device->lun, &lun);
 144        tmf_task.tmf = TMF_ABORT_TASK;
 145        tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
 146
 147        hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
 148out:
 149        /* Do cleanup for this task */
 150        hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
 151        if (task->task_done)
 152                task->task_done(task);
 153        if (sas_dev && sas_dev->running_req)
 154                sas_dev->running_req--;
 155}
 156
 157static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
 158                              int is_tmf, struct hisi_sas_tmf_task *tmf,
 159                              int *pass)
 160{
 161        struct domain_device *device = task->dev;
 162        struct hisi_sas_device *sas_dev = device->lldd_dev;
 163        struct hisi_sas_port *port;
 164        struct hisi_sas_slot *slot;
 165        struct hisi_sas_cmd_hdr *cmd_hdr_base;
 166        struct device *dev = &hisi_hba->pdev->dev;
 167        int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
 168
 169        if (!device->port) {
 170                struct task_status_struct *ts = &task->task_status;
 171
 172                ts->resp = SAS_TASK_UNDELIVERED;
 173                ts->stat = SAS_PHY_DOWN;
 174                /*
 175                 * libsas will use dev->port, should
 176                 * not call task_done for sata
 177                 */
 178                if (device->dev_type != SAS_SATA_DEV)
 179                        task->task_done(task);
 180                return 0;
 181        }
 182
 183        if (DEV_IS_GONE(sas_dev)) {
 184                if (sas_dev)
 185                        dev_info(dev, "task prep: device %llu not ready\n",
 186                                 sas_dev->device_id);
 187                else
 188                        dev_info(dev, "task prep: device %016llx not ready\n",
 189                                 SAS_ADDR(device->sas_addr));
 190
 191                rc = SAS_PHY_DOWN;
 192                return rc;
 193        }
 194        port = device->port->lldd_port;
 195        if (port && !port->port_attached && !tmf) {
 196                if (sas_protocol_ata(task->task_proto)) {
 197                        struct task_status_struct *ts = &task->task_status;
 198
 199                        dev_info(dev,
 200                                 "task prep: SATA/STP port%d not attach device\n",
 201                                 device->port->id);
 202                        ts->resp = SAS_TASK_COMPLETE;
 203                        ts->stat = SAS_PHY_DOWN;
 204                        task->task_done(task);
 205                } else {
 206                        struct task_status_struct *ts = &task->task_status;
 207
 208                        dev_info(dev,
 209                                 "task prep: SAS port%d does not attach device\n",
 210                                 device->port->id);
 211                        ts->resp = SAS_TASK_UNDELIVERED;
 212                        ts->stat = SAS_PHY_DOWN;
 213                        task->task_done(task);
 214                }
 215                return 0;
 216        }
 217
 218        if (!sas_protocol_ata(task->task_proto)) {
 219                if (task->num_scatter) {
 220                        n_elem = dma_map_sg(dev, task->scatter,
 221                                            task->num_scatter, task->data_dir);
 222                        if (!n_elem) {
 223                                rc = -ENOMEM;
 224                                goto prep_out;
 225                        }
 226                }
 227        } else
 228                n_elem = task->num_scatter;
 229
 230        rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
 231        if (rc)
 232                goto err_out;
 233        rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
 234                                         &dlvry_queue_slot);
 235        if (rc)
 236                goto err_out_tag;
 237
 238        slot = &hisi_hba->slot_info[slot_idx];
 239        memset(slot, 0, sizeof(struct hisi_sas_slot));
 240
 241        slot->idx = slot_idx;
 242        slot->n_elem = n_elem;
 243        slot->dlvry_queue = dlvry_queue;
 244        slot->dlvry_queue_slot = dlvry_queue_slot;
 245        cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
 246        slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
 247        slot->task = task;
 248        slot->port = port;
 249        task->lldd_task = slot;
 250        INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
 251
 252        slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
 253                                             GFP_ATOMIC,
 254                                             &slot->status_buffer_dma);
 255        if (!slot->status_buffer) {
 256                rc = -ENOMEM;
 257                goto err_out_slot_buf;
 258        }
 259        memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ);
 260
 261        slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool,
 262                                             GFP_ATOMIC,
 263                                             &slot->command_table_dma);
 264        if (!slot->command_table) {
 265                rc = -ENOMEM;
 266                goto err_out_status_buf;
 267        }
 268        memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ);
 269        memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
 270
 271        switch (task->task_proto) {
 272        case SAS_PROTOCOL_SMP:
 273                rc = hisi_sas_task_prep_smp(hisi_hba, slot);
 274                break;
 275        case SAS_PROTOCOL_SSP:
 276                rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
 277                break;
 278        case SAS_PROTOCOL_SATA:
 279        case SAS_PROTOCOL_STP:
 280        case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
 281                rc = hisi_sas_task_prep_ata(hisi_hba, slot);
 282                break;
 283        default:
 284                dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
 285                        task->task_proto);
 286                rc = -EINVAL;
 287                break;
 288        }
 289
 290        if (rc) {
 291                dev_err(dev, "task prep: rc = 0x%x\n", rc);
 292                if (slot->sge_page)
 293                        goto err_out_sge;
 294                goto err_out_command_table;
 295        }
 296
 297        list_add_tail(&slot->entry, &port->list);
 298        spin_lock(&task->task_state_lock);
 299        task->task_state_flags |= SAS_TASK_AT_INITIATOR;
 300        spin_unlock(&task->task_state_lock);
 301
 302        hisi_hba->slot_prep = slot;
 303
 304        sas_dev->running_req++;
 305        ++(*pass);
 306
 307        return 0;
 308
 309err_out_sge:
 310        dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
 311                slot->sge_page_dma);
 312err_out_command_table:
 313        dma_pool_free(hisi_hba->command_table_pool, slot->command_table,
 314                slot->command_table_dma);
 315err_out_status_buf:
 316        dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer,
 317                slot->status_buffer_dma);
 318err_out_slot_buf:
 319        /* Nothing to be done */
 320err_out_tag:
 321        hisi_sas_slot_index_free(hisi_hba, slot_idx);
 322err_out:
 323        dev_err(dev, "task prep: failed[%d]!\n", rc);
 324        if (!sas_protocol_ata(task->task_proto))
 325                if (n_elem)
 326                        dma_unmap_sg(dev, task->scatter, n_elem,
 327                                     task->data_dir);
 328prep_out:
 329        return rc;
 330}
 331
 332static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
 333                              int is_tmf, struct hisi_sas_tmf_task *tmf)
 334{
 335        u32 rc;
 336        u32 pass = 0;
 337        unsigned long flags;
 338        struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
 339        struct device *dev = &hisi_hba->pdev->dev;
 340
 341        /* protect task_prep and start_delivery sequence */
 342        spin_lock_irqsave(&hisi_hba->lock, flags);
 343        rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
 344        if (rc)
 345                dev_err(dev, "task exec: failed[%d]!\n", rc);
 346
 347        if (likely(pass))
 348                hisi_hba->hw->start_delivery(hisi_hba);
 349        spin_unlock_irqrestore(&hisi_hba->lock, flags);
 350
 351        return rc;
 352}
 353
 354static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
 355{
 356        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 357        struct asd_sas_phy *sas_phy = &phy->sas_phy;
 358        struct sas_ha_struct *sas_ha;
 359
 360        if (!phy->phy_attached)
 361                return;
 362
 363        sas_ha = &hisi_hba->sha;
 364        sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
 365
 366        if (sas_phy->phy) {
 367                struct sas_phy *sphy = sas_phy->phy;
 368
 369                sphy->negotiated_linkrate = sas_phy->linkrate;
 370                sphy->minimum_linkrate = phy->minimum_linkrate;
 371                sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
 372                sphy->maximum_linkrate = phy->maximum_linkrate;
 373        }
 374
 375        if (phy->phy_type & PORT_TYPE_SAS) {
 376                struct sas_identify_frame *id;
 377
 378                id = (struct sas_identify_frame *)phy->frame_rcvd;
 379                id->dev_type = phy->identify.device_type;
 380                id->initiator_bits = SAS_PROTOCOL_ALL;
 381                id->target_bits = phy->identify.target_port_protocols;
 382        } else if (phy->phy_type & PORT_TYPE_SATA) {
 383                /*Nothing*/
 384        }
 385
 386        sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
 387        sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
 388}
 389
 390static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
 391{
 392        struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 393        struct hisi_sas_device *sas_dev = NULL;
 394        int i;
 395
 396        spin_lock(&hisi_hba->lock);
 397        for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
 398                if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
 399                        hisi_hba->devices[i].device_id = i;
 400                        sas_dev = &hisi_hba->devices[i];
 401                        sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
 402                        sas_dev->dev_type = device->dev_type;
 403                        sas_dev->hisi_hba = hisi_hba;
 404                        sas_dev->sas_device = device;
 405                        break;
 406                }
 407        }
 408        spin_unlock(&hisi_hba->lock);
 409
 410        return sas_dev;
 411}
 412
 413static int hisi_sas_dev_found(struct domain_device *device)
 414{
 415        struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 416        struct domain_device *parent_dev = device->parent;
 417        struct hisi_sas_device *sas_dev;
 418        struct device *dev = &hisi_hba->pdev->dev;
 419
 420        sas_dev = hisi_sas_alloc_dev(device);
 421        if (!sas_dev) {
 422                dev_err(dev, "fail alloc dev: max support %d devices\n",
 423                        HISI_SAS_MAX_DEVICES);
 424                return -EINVAL;
 425        }
 426
 427        device->lldd_dev = sas_dev;
 428        hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
 429
 430        if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
 431                int phy_no;
 432                u8 phy_num = parent_dev->ex_dev.num_phys;
 433                struct ex_phy *phy;
 434
 435                for (phy_no = 0; phy_no < phy_num; phy_no++) {
 436                        phy = &parent_dev->ex_dev.ex_phy[phy_no];
 437                        if (SAS_ADDR(phy->attached_sas_addr) ==
 438                                SAS_ADDR(device->sas_addr)) {
 439                                sas_dev->attached_phy = phy_no;
 440                                break;
 441                        }
 442                }
 443
 444                if (phy_no == phy_num) {
 445                        dev_info(dev, "dev found: no attached "
 446                                 "dev:%016llx at ex:%016llx\n",
 447                                 SAS_ADDR(device->sas_addr),
 448                                 SAS_ADDR(parent_dev->sas_addr));
 449                        return -EINVAL;
 450                }
 451        }
 452
 453        return 0;
 454}
 455
 456static int hisi_sas_slave_configure(struct scsi_device *sdev)
 457{
 458        struct domain_device *dev = sdev_to_domain_dev(sdev);
 459        int ret = sas_slave_configure(sdev);
 460
 461        if (ret)
 462                return ret;
 463        if (!dev_is_sata(dev))
 464                sas_change_queue_depth(sdev, 64);
 465
 466        return 0;
 467}
 468
 469static void hisi_sas_scan_start(struct Scsi_Host *shost)
 470{
 471        struct hisi_hba *hisi_hba = shost_priv(shost);
 472        int i;
 473
 474        for (i = 0; i < hisi_hba->n_phy; ++i)
 475                hisi_sas_bytes_dmaed(hisi_hba, i);
 476
 477        hisi_hba->scan_finished = 1;
 478}
 479
 480static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
 481{
 482        struct hisi_hba *hisi_hba = shost_priv(shost);
 483        struct sas_ha_struct *sha = &hisi_hba->sha;
 484
 485        if (hisi_hba->scan_finished == 0)
 486                return 0;
 487
 488        sas_drain_work(sha);
 489        return 1;
 490}
 491
 492static void hisi_sas_phyup_work(struct work_struct *work)
 493{
 494        struct hisi_sas_phy *phy =
 495                container_of(work, struct hisi_sas_phy, phyup_ws);
 496        struct hisi_hba *hisi_hba = phy->hisi_hba;
 497        struct asd_sas_phy *sas_phy = &phy->sas_phy;
 498        int phy_no = sas_phy->id;
 499
 500        hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
 501        hisi_sas_bytes_dmaed(hisi_hba, phy_no);
 502}
 503
 504static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
 505{
 506        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 507        struct asd_sas_phy *sas_phy = &phy->sas_phy;
 508
 509        phy->hisi_hba = hisi_hba;
 510        phy->port = NULL;
 511        init_timer(&phy->timer);
 512        sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
 513        sas_phy->class = SAS;
 514        sas_phy->iproto = SAS_PROTOCOL_ALL;
 515        sas_phy->tproto = 0;
 516        sas_phy->type = PHY_TYPE_PHYSICAL;
 517        sas_phy->role = PHY_ROLE_INITIATOR;
 518        sas_phy->oob_mode = OOB_NOT_CONNECTED;
 519        sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
 520        sas_phy->id = phy_no;
 521        sas_phy->sas_addr = &hisi_hba->sas_addr[0];
 522        sas_phy->frame_rcvd = &phy->frame_rcvd[0];
 523        sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
 524        sas_phy->lldd_phy = phy;
 525
 526        INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
 527}
 528
 529static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
 530{
 531        struct sas_ha_struct *sas_ha = sas_phy->ha;
 532        struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
 533        struct hisi_sas_phy *phy = sas_phy->lldd_phy;
 534        struct asd_sas_port *sas_port = sas_phy->port;
 535        struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id];
 536        unsigned long flags;
 537
 538        if (!sas_port)
 539                return;
 540
 541        spin_lock_irqsave(&hisi_hba->lock, flags);
 542        port->port_attached = 1;
 543        port->id = phy->port_id;
 544        phy->port = port;
 545        sas_port->lldd_port = port;
 546        spin_unlock_irqrestore(&hisi_hba->lock, flags);
 547}
 548
 549static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, int phy_no,
 550                                     struct domain_device *device)
 551{
 552        struct hisi_sas_phy *phy;
 553        struct hisi_sas_port *port;
 554        struct hisi_sas_slot *slot, *slot2;
 555        struct device *dev = &hisi_hba->pdev->dev;
 556
 557        phy = &hisi_hba->phy[phy_no];
 558        port = phy->port;
 559        if (!port)
 560                return;
 561
 562        list_for_each_entry_safe(slot, slot2, &port->list, entry) {
 563                struct sas_task *task;
 564
 565                task = slot->task;
 566                if (device && task->dev != device)
 567                        continue;
 568
 569                dev_info(dev, "Release slot [%d:%d], task [%p]:\n",
 570                         slot->dlvry_queue, slot->dlvry_queue_slot, task);
 571                hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
 572        }
 573}
 574
 575static void hisi_sas_port_notify_deformed(struct asd_sas_phy *sas_phy)
 576{
 577        struct domain_device *device;
 578        struct hisi_sas_phy *phy = sas_phy->lldd_phy;
 579        struct asd_sas_port *sas_port = sas_phy->port;
 580
 581        list_for_each_entry(device, &sas_port->dev_list, dev_list_node)
 582                hisi_sas_do_release_task(phy->hisi_hba, sas_phy->id, device);
 583}
 584
 585static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
 586                        struct domain_device *device)
 587{
 588        struct asd_sas_port *port = device->port;
 589        struct asd_sas_phy *sas_phy;
 590
 591        list_for_each_entry(sas_phy, &port->phy_list, port_phy_el)
 592                hisi_sas_do_release_task(hisi_hba, sas_phy->id, device);
 593}
 594
 595static void hisi_sas_dev_gone(struct domain_device *device)
 596{
 597        struct hisi_sas_device *sas_dev = device->lldd_dev;
 598        struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 599        struct device *dev = &hisi_hba->pdev->dev;
 600        u64 dev_id = sas_dev->device_id;
 601
 602        dev_info(dev, "found dev[%lld:%x] is gone\n",
 603                 sas_dev->device_id, sas_dev->dev_type);
 604
 605        hisi_hba->hw->free_device(hisi_hba, sas_dev);
 606        device->lldd_dev = NULL;
 607        memset(sas_dev, 0, sizeof(*sas_dev));
 608        sas_dev->device_id = dev_id;
 609        sas_dev->dev_type = SAS_PHY_UNUSED;
 610        sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
 611}
 612
 613static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
 614{
 615        return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
 616}
 617
 618static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
 619                                void *funcdata)
 620{
 621        struct sas_ha_struct *sas_ha = sas_phy->ha;
 622        struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
 623        int phy_no = sas_phy->id;
 624
 625        switch (func) {
 626        case PHY_FUNC_HARD_RESET:
 627                hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
 628                break;
 629
 630        case PHY_FUNC_LINK_RESET:
 631                hisi_hba->hw->phy_enable(hisi_hba, phy_no);
 632                hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
 633                break;
 634
 635        case PHY_FUNC_DISABLE:
 636                hisi_hba->hw->phy_disable(hisi_hba, phy_no);
 637                break;
 638
 639        case PHY_FUNC_SET_LINK_RATE:
 640        case PHY_FUNC_RELEASE_SPINUP_HOLD:
 641        default:
 642                return -EOPNOTSUPP;
 643        }
 644        return 0;
 645}
 646
 647static void hisi_sas_task_done(struct sas_task *task)
 648{
 649        if (!del_timer(&task->slow_task->timer))
 650                return;
 651        complete(&task->slow_task->completion);
 652}
 653
 654static void hisi_sas_tmf_timedout(unsigned long data)
 655{
 656        struct sas_task *task = (struct sas_task *)data;
 657
 658        task->task_state_flags |= SAS_TASK_STATE_ABORTED;
 659        complete(&task->slow_task->completion);
 660}
 661
 662#define TASK_TIMEOUT 20
 663#define TASK_RETRY 3
 664static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
 665                                           void *parameter, u32 para_len,
 666                                           struct hisi_sas_tmf_task *tmf)
 667{
 668        struct hisi_sas_device *sas_dev = device->lldd_dev;
 669        struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
 670        struct device *dev = &hisi_hba->pdev->dev;
 671        struct sas_task *task;
 672        int res, retry;
 673
 674        for (retry = 0; retry < TASK_RETRY; retry++) {
 675                task = sas_alloc_slow_task(GFP_KERNEL);
 676                if (!task)
 677                        return -ENOMEM;
 678
 679                task->dev = device;
 680                task->task_proto = device->tproto;
 681
 682                memcpy(&task->ssp_task, parameter, para_len);
 683                task->task_done = hisi_sas_task_done;
 684
 685                task->slow_task->timer.data = (unsigned long) task;
 686                task->slow_task->timer.function = hisi_sas_tmf_timedout;
 687                task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
 688                add_timer(&task->slow_task->timer);
 689
 690                res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
 691
 692                if (res) {
 693                        del_timer(&task->slow_task->timer);
 694                        dev_err(dev, "abort tmf: executing internal task failed: %d\n",
 695                                res);
 696                        goto ex_err;
 697                }
 698
 699                wait_for_completion(&task->slow_task->completion);
 700                res = TMF_RESP_FUNC_FAILED;
 701                /* Even TMF timed out, return direct. */
 702                if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
 703                        if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
 704                                dev_err(dev, "abort tmf: TMF task[%d] timeout\n",
 705                                        tmf->tag_of_task_to_be_managed);
 706                                if (task->lldd_task) {
 707                                        struct hisi_sas_slot *slot =
 708                                                task->lldd_task;
 709
 710                                        hisi_sas_slot_task_free(hisi_hba,
 711                                                                task, slot);
 712                                }
 713
 714                                goto ex_err;
 715                        }
 716                }
 717
 718                if (task->task_status.resp == SAS_TASK_COMPLETE &&
 719                     task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
 720                        res = TMF_RESP_FUNC_COMPLETE;
 721                        break;
 722                }
 723
 724                if (task->task_status.resp == SAS_TASK_COMPLETE &&
 725                      task->task_status.stat == SAS_DATA_UNDERRUN) {
 726                        /* no error, but return the number of bytes of
 727                         * underrun
 728                         */
 729                        dev_warn(dev, "abort tmf: task to dev %016llx "
 730                                 "resp: 0x%x sts 0x%x underrun\n",
 731                                 SAS_ADDR(device->sas_addr),
 732                                 task->task_status.resp,
 733                                 task->task_status.stat);
 734                        res = task->task_status.residual;
 735                        break;
 736                }
 737
 738                if (task->task_status.resp == SAS_TASK_COMPLETE &&
 739                        task->task_status.stat == SAS_DATA_OVERRUN) {
 740                        dev_warn(dev, "abort tmf: blocked task error\n");
 741                        res = -EMSGSIZE;
 742                        break;
 743                }
 744
 745                dev_warn(dev, "abort tmf: task to dev "
 746                         "%016llx resp: 0x%x status 0x%x\n",
 747                         SAS_ADDR(device->sas_addr), task->task_status.resp,
 748                         task->task_status.stat);
 749                sas_free_task(task);
 750                task = NULL;
 751        }
 752ex_err:
 753        WARN_ON(retry == TASK_RETRY);
 754        sas_free_task(task);
 755        return res;
 756}
 757
 758static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
 759                                u8 *lun, struct hisi_sas_tmf_task *tmf)
 760{
 761        struct sas_ssp_task ssp_task;
 762
 763        if (!(device->tproto & SAS_PROTOCOL_SSP))
 764                return TMF_RESP_FUNC_ESUPP;
 765
 766        memcpy(ssp_task.LUN, lun, 8);
 767
 768        return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
 769                                sizeof(ssp_task), tmf);
 770}
 771
 772static int hisi_sas_abort_task(struct sas_task *task)
 773{
 774        struct scsi_lun lun;
 775        struct hisi_sas_tmf_task tmf_task;
 776        struct domain_device *device = task->dev;
 777        struct hisi_sas_device *sas_dev = device->lldd_dev;
 778        struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
 779        struct device *dev = &hisi_hba->pdev->dev;
 780        int rc = TMF_RESP_FUNC_FAILED;
 781        unsigned long flags;
 782
 783        if (!sas_dev) {
 784                dev_warn(dev, "Device has been removed\n");
 785                return TMF_RESP_FUNC_FAILED;
 786        }
 787
 788        spin_lock_irqsave(&task->task_state_lock, flags);
 789        if (task->task_state_flags & SAS_TASK_STATE_DONE) {
 790                spin_unlock_irqrestore(&task->task_state_lock, flags);
 791                rc = TMF_RESP_FUNC_COMPLETE;
 792                goto out;
 793        }
 794
 795        spin_unlock_irqrestore(&task->task_state_lock, flags);
 796        sas_dev->dev_status = HISI_SAS_DEV_EH;
 797        if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
 798                struct scsi_cmnd *cmnd = task->uldd_task;
 799                struct hisi_sas_slot *slot = task->lldd_task;
 800                u32 tag = slot->idx;
 801
 802                int_to_scsilun(cmnd->device->lun, &lun);
 803                tmf_task.tmf = TMF_ABORT_TASK;
 804                tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
 805
 806                rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
 807                                                  &tmf_task);
 808
 809                /* if successful, clear the task and callback forwards.*/
 810                if (rc == TMF_RESP_FUNC_COMPLETE) {
 811                        if (task->lldd_task) {
 812                                struct hisi_sas_slot *slot;
 813
 814                                slot = &hisi_hba->slot_info
 815                                        [tmf_task.tag_of_task_to_be_managed];
 816                                spin_lock_irqsave(&hisi_hba->lock, flags);
 817                                hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
 818                                spin_unlock_irqrestore(&hisi_hba->lock, flags);
 819                        }
 820                }
 821
 822        } else if (task->task_proto & SAS_PROTOCOL_SATA ||
 823                task->task_proto & SAS_PROTOCOL_STP) {
 824                if (task->dev->dev_type == SAS_SATA_DEV) {
 825                        struct hisi_slot_info *slot = task->lldd_task;
 826
 827                        dev_notice(dev, "abort task: hba=%p task=%p slot=%p\n",
 828                                   hisi_hba, task, slot);
 829                        task->task_state_flags |= SAS_TASK_STATE_ABORTED;
 830                        rc = TMF_RESP_FUNC_COMPLETE;
 831                        goto out;
 832                }
 833
 834        }
 835
 836out:
 837        if (rc != TMF_RESP_FUNC_COMPLETE)
 838                dev_notice(dev, "abort task: rc=%d\n", rc);
 839        return rc;
 840}
 841
 842static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
 843{
 844        struct hisi_sas_tmf_task tmf_task;
 845        int rc = TMF_RESP_FUNC_FAILED;
 846
 847        tmf_task.tmf = TMF_ABORT_TASK_SET;
 848        rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
 849
 850        return rc;
 851}
 852
 853static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
 854{
 855        int rc = TMF_RESP_FUNC_FAILED;
 856        struct hisi_sas_tmf_task tmf_task;
 857
 858        tmf_task.tmf = TMF_CLEAR_ACA;
 859        rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
 860
 861        return rc;
 862}
 863
 864static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
 865{
 866        struct sas_phy *phy = sas_get_local_phy(device);
 867        int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
 868                        (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
 869        rc = sas_phy_reset(phy, reset_type);
 870        sas_put_local_phy(phy);
 871        msleep(2000);
 872        return rc;
 873}
 874
 875static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
 876{
 877        struct hisi_sas_device *sas_dev = device->lldd_dev;
 878        struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 879        unsigned long flags;
 880        int rc = TMF_RESP_FUNC_FAILED;
 881
 882        if (sas_dev->dev_status != HISI_SAS_DEV_EH)
 883                return TMF_RESP_FUNC_FAILED;
 884        sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
 885
 886        rc = hisi_sas_debug_I_T_nexus_reset(device);
 887
 888        spin_lock_irqsave(&hisi_hba->lock, flags);
 889        hisi_sas_release_task(hisi_hba, device);
 890        spin_unlock_irqrestore(&hisi_hba->lock, flags);
 891
 892        return 0;
 893}
 894
 895static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
 896{
 897        struct hisi_sas_tmf_task tmf_task;
 898        struct hisi_sas_device *sas_dev = device->lldd_dev;
 899        struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 900        struct device *dev = &hisi_hba->pdev->dev;
 901        unsigned long flags;
 902        int rc = TMF_RESP_FUNC_FAILED;
 903
 904        tmf_task.tmf = TMF_LU_RESET;
 905        sas_dev->dev_status = HISI_SAS_DEV_EH;
 906        rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
 907        if (rc == TMF_RESP_FUNC_COMPLETE) {
 908                spin_lock_irqsave(&hisi_hba->lock, flags);
 909                hisi_sas_release_task(hisi_hba, device);
 910                spin_unlock_irqrestore(&hisi_hba->lock, flags);
 911        }
 912
 913        /* If failed, fall-through I_T_Nexus reset */
 914        dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
 915                sas_dev->device_id, rc);
 916        return rc;
 917}
 918
 919static int hisi_sas_query_task(struct sas_task *task)
 920{
 921        struct scsi_lun lun;
 922        struct hisi_sas_tmf_task tmf_task;
 923        int rc = TMF_RESP_FUNC_FAILED;
 924
 925        if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
 926                struct scsi_cmnd *cmnd = task->uldd_task;
 927                struct domain_device *device = task->dev;
 928                struct hisi_sas_slot *slot = task->lldd_task;
 929                u32 tag = slot->idx;
 930
 931                int_to_scsilun(cmnd->device->lun, &lun);
 932                tmf_task.tmf = TMF_QUERY_TASK;
 933                tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
 934
 935                rc = hisi_sas_debug_issue_ssp_tmf(device,
 936                                                  lun.scsi_lun,
 937                                                  &tmf_task);
 938                switch (rc) {
 939                /* The task is still in Lun, release it then */
 940                case TMF_RESP_FUNC_SUCC:
 941                /* The task is not in Lun or failed, reset the phy */
 942                case TMF_RESP_FUNC_FAILED:
 943                case TMF_RESP_FUNC_COMPLETE:
 944                        break;
 945                }
 946        }
 947        return rc;
 948}
 949
 950static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
 951{
 952        hisi_sas_port_notify_formed(sas_phy);
 953}
 954
 955static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
 956{
 957        hisi_sas_port_notify_deformed(sas_phy);
 958}
 959
 960static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
 961{
 962        phy->phy_attached = 0;
 963        phy->phy_type = 0;
 964        phy->port = NULL;
 965}
 966
 967void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
 968{
 969        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 970        struct asd_sas_phy *sas_phy = &phy->sas_phy;
 971        struct sas_ha_struct *sas_ha = &hisi_hba->sha;
 972
 973        if (rdy) {
 974                /* Phy down but ready */
 975                hisi_sas_bytes_dmaed(hisi_hba, phy_no);
 976                hisi_sas_port_notify_formed(sas_phy);
 977        } else {
 978                struct hisi_sas_port *port  = phy->port;
 979
 980                /* Phy down and not ready */
 981                sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
 982                sas_phy_disconnected(sas_phy);
 983
 984                if (port) {
 985                        if (phy->phy_type & PORT_TYPE_SAS) {
 986                                int port_id = port->id;
 987
 988                                if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
 989                                                                       port_id))
 990                                        port->port_attached = 0;
 991                        } else if (phy->phy_type & PORT_TYPE_SATA)
 992                                port->port_attached = 0;
 993                }
 994                hisi_sas_phy_disconnected(phy);
 995        }
 996}
 997EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
 998
 999static struct scsi_transport_template *hisi_sas_stt;
1000
1001static struct scsi_host_template hisi_sas_sht = {
1002        .module                 = THIS_MODULE,
1003        .name                   = DRV_NAME,
1004        .queuecommand           = sas_queuecommand,
1005        .target_alloc           = sas_target_alloc,
1006        .slave_configure        = hisi_sas_slave_configure,
1007        .scan_finished          = hisi_sas_scan_finished,
1008        .scan_start             = hisi_sas_scan_start,
1009        .change_queue_depth     = sas_change_queue_depth,
1010        .bios_param             = sas_bios_param,
1011        .can_queue              = 1,
1012        .this_id                = -1,
1013        .sg_tablesize           = SG_ALL,
1014        .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
1015        .use_clustering         = ENABLE_CLUSTERING,
1016        .eh_device_reset_handler = sas_eh_device_reset_handler,
1017        .eh_bus_reset_handler   = sas_eh_bus_reset_handler,
1018        .target_destroy         = sas_target_destroy,
1019        .ioctl                  = sas_ioctl,
1020};
1021
1022static struct sas_domain_function_template hisi_sas_transport_ops = {
1023        .lldd_dev_found         = hisi_sas_dev_found,
1024        .lldd_dev_gone          = hisi_sas_dev_gone,
1025        .lldd_execute_task      = hisi_sas_queue_command,
1026        .lldd_control_phy       = hisi_sas_control_phy,
1027        .lldd_abort_task        = hisi_sas_abort_task,
1028        .lldd_abort_task_set    = hisi_sas_abort_task_set,
1029        .lldd_clear_aca         = hisi_sas_clear_aca,
1030        .lldd_I_T_nexus_reset   = hisi_sas_I_T_nexus_reset,
1031        .lldd_lu_reset          = hisi_sas_lu_reset,
1032        .lldd_query_task        = hisi_sas_query_task,
1033        .lldd_port_formed       = hisi_sas_port_formed,
1034        .lldd_port_deformed     = hisi_sas_port_deformed,
1035};
1036
1037static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1038{
1039        struct platform_device *pdev = hisi_hba->pdev;
1040        struct device *dev = &pdev->dev;
1041        int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1042
1043        spin_lock_init(&hisi_hba->lock);
1044        for (i = 0; i < hisi_hba->n_phy; i++) {
1045                hisi_sas_phy_init(hisi_hba, i);
1046                hisi_hba->port[i].port_attached = 0;
1047                hisi_hba->port[i].id = -1;
1048                INIT_LIST_HEAD(&hisi_hba->port[i].list);
1049        }
1050
1051        for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1052                hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1053                hisi_hba->devices[i].device_id = i;
1054                hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1055        }
1056
1057        for (i = 0; i < hisi_hba->queue_count; i++) {
1058                struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1059
1060                /* Completion queue structure */
1061                cq->id = i;
1062                cq->hisi_hba = hisi_hba;
1063
1064                /* Delivery queue */
1065                s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1066                hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1067                                        &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1068                if (!hisi_hba->cmd_hdr[i])
1069                        goto err_out;
1070                memset(hisi_hba->cmd_hdr[i], 0, s);
1071
1072                /* Completion queue */
1073                s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1074                hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1075                                &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1076                if (!hisi_hba->complete_hdr[i])
1077                        goto err_out;
1078                memset(hisi_hba->complete_hdr[i], 0, s);
1079        }
1080
1081        s = HISI_SAS_STATUS_BUF_SZ;
1082        hisi_hba->status_buffer_pool = dma_pool_create("status_buffer",
1083                                                       dev, s, 16, 0);
1084        if (!hisi_hba->status_buffer_pool)
1085                goto err_out;
1086
1087        s = HISI_SAS_COMMAND_TABLE_SZ;
1088        hisi_hba->command_table_pool = dma_pool_create("command_table",
1089                                                       dev, s, 16, 0);
1090        if (!hisi_hba->command_table_pool)
1091                goto err_out;
1092
1093        s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1094        hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1095                                            GFP_KERNEL);
1096        if (!hisi_hba->itct)
1097                goto err_out;
1098
1099        memset(hisi_hba->itct, 0, s);
1100
1101        hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1102                                           sizeof(struct hisi_sas_slot),
1103                                           GFP_KERNEL);
1104        if (!hisi_hba->slot_info)
1105                goto err_out;
1106
1107        s = max_command_entries * sizeof(struct hisi_sas_iost);
1108        hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1109                                            GFP_KERNEL);
1110        if (!hisi_hba->iost)
1111                goto err_out;
1112
1113        memset(hisi_hba->iost, 0, s);
1114
1115        s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1116        hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1117                                &hisi_hba->breakpoint_dma, GFP_KERNEL);
1118        if (!hisi_hba->breakpoint)
1119                goto err_out;
1120
1121        memset(hisi_hba->breakpoint, 0, s);
1122
1123        hisi_hba->slot_index_count = max_command_entries;
1124        s = hisi_hba->slot_index_count / sizeof(unsigned long);
1125        hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1126        if (!hisi_hba->slot_index_tags)
1127                goto err_out;
1128
1129        hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev,
1130                                sizeof(struct hisi_sas_sge_page), 16, 0);
1131        if (!hisi_hba->sge_page_pool)
1132                goto err_out;
1133
1134        s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1135        hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1136                                &hisi_hba->initial_fis_dma, GFP_KERNEL);
1137        if (!hisi_hba->initial_fis)
1138                goto err_out;
1139        memset(hisi_hba->initial_fis, 0, s);
1140
1141        s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1142        hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1143                                &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1144        if (!hisi_hba->sata_breakpoint)
1145                goto err_out;
1146        memset(hisi_hba->sata_breakpoint, 0, s);
1147
1148        hisi_sas_slot_index_init(hisi_hba);
1149
1150        hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1151        if (!hisi_hba->wq) {
1152                dev_err(dev, "sas_alloc: failed to create workqueue\n");
1153                goto err_out;
1154        }
1155
1156        return 0;
1157err_out:
1158        return -ENOMEM;
1159}
1160
1161static void hisi_sas_free(struct hisi_hba *hisi_hba)
1162{
1163        struct device *dev = &hisi_hba->pdev->dev;
1164        int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1165
1166        for (i = 0; i < hisi_hba->queue_count; i++) {
1167                s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1168                if (hisi_hba->cmd_hdr[i])
1169                        dma_free_coherent(dev, s,
1170                                          hisi_hba->cmd_hdr[i],
1171                                          hisi_hba->cmd_hdr_dma[i]);
1172
1173                s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1174                if (hisi_hba->complete_hdr[i])
1175                        dma_free_coherent(dev, s,
1176                                          hisi_hba->complete_hdr[i],
1177                                          hisi_hba->complete_hdr_dma[i]);
1178        }
1179
1180        dma_pool_destroy(hisi_hba->status_buffer_pool);
1181        dma_pool_destroy(hisi_hba->command_table_pool);
1182        dma_pool_destroy(hisi_hba->sge_page_pool);
1183
1184        s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1185        if (hisi_hba->itct)
1186                dma_free_coherent(dev, s,
1187                                  hisi_hba->itct, hisi_hba->itct_dma);
1188
1189        s = max_command_entries * sizeof(struct hisi_sas_iost);
1190        if (hisi_hba->iost)
1191                dma_free_coherent(dev, s,
1192                                  hisi_hba->iost, hisi_hba->iost_dma);
1193
1194        s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1195        if (hisi_hba->breakpoint)
1196                dma_free_coherent(dev, s,
1197                                  hisi_hba->breakpoint,
1198                                  hisi_hba->breakpoint_dma);
1199
1200
1201        s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1202        if (hisi_hba->initial_fis)
1203                dma_free_coherent(dev, s,
1204                                  hisi_hba->initial_fis,
1205                                  hisi_hba->initial_fis_dma);
1206
1207        s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1208        if (hisi_hba->sata_breakpoint)
1209                dma_free_coherent(dev, s,
1210                                  hisi_hba->sata_breakpoint,
1211                                  hisi_hba->sata_breakpoint_dma);
1212
1213        if (hisi_hba->wq)
1214                destroy_workqueue(hisi_hba->wq);
1215}
1216
1217static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1218                                              const struct hisi_sas_hw *hw)
1219{
1220        struct resource *res;
1221        struct Scsi_Host *shost;
1222        struct hisi_hba *hisi_hba;
1223        struct device *dev = &pdev->dev;
1224        struct device_node *np = pdev->dev.of_node;
1225
1226        shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
1227        if (!shost)
1228                goto err_out;
1229        hisi_hba = shost_priv(shost);
1230
1231        hisi_hba->hw = hw;
1232        hisi_hba->pdev = pdev;
1233        hisi_hba->shost = shost;
1234        SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1235
1236        init_timer(&hisi_hba->timer);
1237
1238        if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1239                                          SAS_ADDR_SIZE))
1240                goto err_out;
1241
1242        if (np) {
1243                hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1244                                        "hisilicon,sas-syscon");
1245                if (IS_ERR(hisi_hba->ctrl))
1246                        goto err_out;
1247
1248                if (device_property_read_u32(dev, "ctrl-reset-reg",
1249                                             &hisi_hba->ctrl_reset_reg))
1250                        goto err_out;
1251
1252                if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1253                                             &hisi_hba->ctrl_reset_sts_reg))
1254                        goto err_out;
1255
1256                if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1257                                             &hisi_hba->ctrl_clock_ena_reg))
1258                        goto err_out;
1259        }
1260
1261        if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
1262                goto err_out;
1263
1264        if (device_property_read_u32(dev, "queue-count",
1265                                     &hisi_hba->queue_count))
1266                goto err_out;
1267
1268        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1269        hisi_hba->regs = devm_ioremap_resource(dev, res);
1270        if (IS_ERR(hisi_hba->regs))
1271                goto err_out;
1272
1273        if (hisi_sas_alloc(hisi_hba, shost)) {
1274                hisi_sas_free(hisi_hba);
1275                goto err_out;
1276        }
1277
1278        return shost;
1279err_out:
1280        dev_err(dev, "shost alloc failed\n");
1281        return NULL;
1282}
1283
1284static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1285{
1286        int i;
1287
1288        for (i = 0; i < hisi_hba->n_phy; i++)
1289                memcpy(&hisi_hba->phy[i].dev_sas_addr,
1290                       hisi_hba->sas_addr,
1291                       SAS_ADDR_SIZE);
1292}
1293
1294int hisi_sas_probe(struct platform_device *pdev,
1295                         const struct hisi_sas_hw *hw)
1296{
1297        struct Scsi_Host *shost;
1298        struct hisi_hba *hisi_hba;
1299        struct device *dev = &pdev->dev;
1300        struct asd_sas_phy **arr_phy;
1301        struct asd_sas_port **arr_port;
1302        struct sas_ha_struct *sha;
1303        int rc, phy_nr, port_nr, i;
1304
1305        shost = hisi_sas_shost_alloc(pdev, hw);
1306        if (!shost) {
1307                rc = -ENOMEM;
1308                goto err_out_ha;
1309        }
1310
1311        sha = SHOST_TO_SAS_HA(shost);
1312        hisi_hba = shost_priv(shost);
1313        platform_set_drvdata(pdev, sha);
1314
1315        if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1316            dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1317                dev_err(dev, "No usable DMA addressing method\n");
1318                rc = -EIO;
1319                goto err_out_ha;
1320        }
1321
1322        phy_nr = port_nr = hisi_hba->n_phy;
1323
1324        arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1325        arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1326        if (!arr_phy || !arr_port)
1327                return -ENOMEM;
1328
1329        sha->sas_phy = arr_phy;
1330        sha->sas_port = arr_port;
1331        sha->core.shost = shost;
1332        sha->lldd_ha = hisi_hba;
1333
1334        shost->transportt = hisi_sas_stt;
1335        shost->max_id = HISI_SAS_MAX_DEVICES;
1336        shost->max_lun = ~0;
1337        shost->max_channel = 1;
1338        shost->max_cmd_len = 16;
1339        shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1340        shost->can_queue = hisi_hba->hw->max_command_entries;
1341        shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
1342
1343        sha->sas_ha_name = DRV_NAME;
1344        sha->dev = &hisi_hba->pdev->dev;
1345        sha->lldd_module = THIS_MODULE;
1346        sha->sas_addr = &hisi_hba->sas_addr[0];
1347        sha->num_phys = hisi_hba->n_phy;
1348        sha->core.shost = hisi_hba->shost;
1349
1350        for (i = 0; i < hisi_hba->n_phy; i++) {
1351                sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1352                sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1353        }
1354
1355        hisi_sas_init_add(hisi_hba);
1356
1357        rc = hisi_hba->hw->hw_init(hisi_hba);
1358        if (rc)
1359                goto err_out_ha;
1360
1361        rc = scsi_add_host(shost, &pdev->dev);
1362        if (rc)
1363                goto err_out_ha;
1364
1365        rc = sas_register_ha(sha);
1366        if (rc)
1367                goto err_out_register_ha;
1368
1369        scsi_scan_host(shost);
1370
1371        return 0;
1372
1373err_out_register_ha:
1374        scsi_remove_host(shost);
1375err_out_ha:
1376        kfree(shost);
1377        return rc;
1378}
1379EXPORT_SYMBOL_GPL(hisi_sas_probe);
1380
1381int hisi_sas_remove(struct platform_device *pdev)
1382{
1383        struct sas_ha_struct *sha = platform_get_drvdata(pdev);
1384        struct hisi_hba *hisi_hba = sha->lldd_ha;
1385
1386        scsi_remove_host(sha->core.shost);
1387        sas_unregister_ha(sha);
1388        sas_remove_host(sha->core.shost);
1389
1390        hisi_sas_free(hisi_hba);
1391        return 0;
1392}
1393EXPORT_SYMBOL_GPL(hisi_sas_remove);
1394
1395static __init int hisi_sas_init(void)
1396{
1397        pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
1398
1399        hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
1400        if (!hisi_sas_stt)
1401                return -ENOMEM;
1402
1403        return 0;
1404}
1405
1406static __exit void hisi_sas_exit(void)
1407{
1408        sas_release_transport(hisi_sas_stt);
1409}
1410
1411module_init(hisi_sas_init);
1412module_exit(hisi_sas_exit);
1413
1414MODULE_VERSION(DRV_VERSION);
1415MODULE_LICENSE("GPL");
1416MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
1417MODULE_DESCRIPTION("HISILICON SAS controller driver");
1418MODULE_ALIAS("platform:" DRV_NAME);
1419