linux/drivers/scsi/isci/task.c
<<
>>
Prefs
   1/*
   2 * This file is provided under a dual BSD/GPLv2 license.  When using or
   3 * redistributing this file, you may do so under either license.
   4 *
   5 * GPL LICENSE SUMMARY
   6 *
   7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21 * The full GNU General Public License is included in this distribution
  22 * in the file called LICENSE.GPL.
  23 *
  24 * BSD LICENSE
  25 *
  26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  27 * All rights reserved.
  28 *
  29 * Redistribution and use in source and binary forms, with or without
  30 * modification, are permitted provided that the following conditions
  31 * are met:
  32 *
  33 *   * Redistributions of source code must retain the above copyright
  34 *     notice, this list of conditions and the following disclaimer.
  35 *   * Redistributions in binary form must reproduce the above copyright
  36 *     notice, this list of conditions and the following disclaimer in
  37 *     the documentation and/or other materials provided with the
  38 *     distribution.
  39 *   * Neither the name of Intel Corporation nor the names of its
  40 *     contributors may be used to endorse or promote products derived
  41 *     from this software without specific prior written permission.
  42 *
  43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  54 */
  55
  56#include <linux/completion.h>
  57#include <linux/irqflags.h>
  58#include "sas.h"
  59#include <scsi/libsas.h>
  60#include "remote_device.h"
  61#include "remote_node_context.h"
  62#include "isci.h"
  63#include "request.h"
  64#include "task.h"
  65#include "host.h"
  66
  67/**
  68* isci_task_refuse() - complete the request to the upper layer driver in
  69*     the case where an I/O needs to be completed back in the submit path.
  70* @ihost: host on which the the request was queued
  71* @task: request to complete
  72* @response: response code for the completed task.
  73* @status: status code for the completed task.
  74*
  75*/
  76static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
  77                             enum service_response response,
  78                             enum exec_status status)
  79
  80{
  81        enum isci_completion_selection disposition;
  82
  83        disposition = isci_perform_normal_io_completion;
  84        disposition = isci_task_set_completion_status(task, response, status,
  85                                                      disposition);
  86
  87        /* Tasks aborted specifically by a call to the lldd_abort_task
  88         * function should not be completed to the host in the regular path.
  89         */
  90        switch (disposition) {
  91        case isci_perform_normal_io_completion:
  92                /* Normal notification (task_done) */
  93                dev_dbg(&ihost->pdev->dev,
  94                        "%s: Normal - task = %p, response=%d, "
  95                        "status=%d\n",
  96                        __func__, task, response, status);
  97
  98                task->lldd_task = NULL;
  99
 100                isci_execpath_callback(ihost, task, task->task_done);
 101                break;
 102
 103        case isci_perform_aborted_io_completion:
 104                /*
 105                 * No notification because this request is already in the
 106                 * abort path.
 107                 */
 108                dev_dbg(&ihost->pdev->dev,
 109                        "%s: Aborted - task = %p, response=%d, "
 110                        "status=%d\n",
 111                        __func__, task, response, status);
 112                break;
 113
 114        case isci_perform_error_io_completion:
 115                /* Use sas_task_abort */
 116                dev_dbg(&ihost->pdev->dev,
 117                        "%s: Error - task = %p, response=%d, "
 118                        "status=%d\n",
 119                        __func__, task, response, status);
 120
 121                isci_execpath_callback(ihost, task, sas_task_abort);
 122                break;
 123
 124        default:
 125                dev_dbg(&ihost->pdev->dev,
 126                        "%s: isci task notification default case!",
 127                        __func__);
 128                sas_task_abort(task);
 129                break;
 130        }
 131}
 132
 133#define for_each_sas_task(num, task) \
 134        for (; num > 0; num--,\
 135             task = list_entry(task->list.next, struct sas_task, list))
 136
 137
 138static inline int isci_device_io_ready(struct isci_remote_device *idev,
 139                                       struct sas_task *task)
 140{
 141        return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
 142                      (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
 143                       isci_task_is_ncq_recovery(task))
 144                    : 0;
 145}
 146/**
 147 * isci_task_execute_task() - This function is one of the SAS Domain Template
 148 *    functions. This function is called by libsas to send a task down to
 149 *    hardware.
 150 * @task: This parameter specifies the SAS task to send.
 151 * @num: This parameter specifies the number of tasks to queue.
 152 * @gfp_flags: This parameter specifies the context of this call.
 153 *
 154 * status, zero indicates success.
 155 */
 156int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
 157{
 158        struct isci_host *ihost = dev_to_ihost(task->dev);
 159        struct isci_remote_device *idev;
 160        unsigned long flags;
 161        bool io_ready;
 162        u16 tag;
 163
 164        dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
 165
 166        for_each_sas_task(num, task) {
 167                enum sci_status status = SCI_FAILURE;
 168
 169                spin_lock_irqsave(&ihost->scic_lock, flags);
 170                idev = isci_lookup_device(task->dev);
 171                io_ready = isci_device_io_ready(idev, task);
 172                tag = isci_alloc_tag(ihost);
 173                spin_unlock_irqrestore(&ihost->scic_lock, flags);
 174
 175                dev_dbg(&ihost->pdev->dev,
 176                        "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
 177                        task, num, task->dev, idev, idev ? idev->flags : 0,
 178                        task->uldd_task);
 179
 180                if (!idev) {
 181                        isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
 182                                         SAS_DEVICE_UNKNOWN);
 183                } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
 184                        /* Indicate QUEUE_FULL so that the scsi midlayer
 185                         * retries.
 186                          */
 187                        isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
 188                                         SAS_QUEUE_FULL);
 189                } else {
 190                        /* There is a device and it's ready for I/O. */
 191                        spin_lock_irqsave(&task->task_state_lock, flags);
 192
 193                        if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
 194                                /* The I/O was aborted. */
 195                                spin_unlock_irqrestore(&task->task_state_lock,
 196                                                       flags);
 197
 198                                isci_task_refuse(ihost, task,
 199                                                 SAS_TASK_UNDELIVERED,
 200                                                 SAM_STAT_TASK_ABORTED);
 201                        } else {
 202                                task->task_state_flags |= SAS_TASK_AT_INITIATOR;
 203                                spin_unlock_irqrestore(&task->task_state_lock, flags);
 204
 205                                /* build and send the request. */
 206                                status = isci_request_execute(ihost, idev, task, tag);
 207
 208                                if (status != SCI_SUCCESS) {
 209
 210                                        spin_lock_irqsave(&task->task_state_lock, flags);
 211                                        /* Did not really start this command. */
 212                                        task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
 213                                        spin_unlock_irqrestore(&task->task_state_lock, flags);
 214
 215                                        if (test_bit(IDEV_GONE, &idev->flags)) {
 216
 217                                                /* Indicate that the device
 218                                                 * is gone.
 219                                                 */
 220                                                isci_task_refuse(ihost, task,
 221                                                        SAS_TASK_UNDELIVERED,
 222                                                        SAS_DEVICE_UNKNOWN);
 223                                        } else {
 224                                                /* Indicate QUEUE_FULL so that
 225                                                 * the scsi midlayer retries.
 226                                                 * If the request failed for
 227                                                 * remote device reasons, it
 228                                                 * gets returned as
 229                                                 * SAS_TASK_UNDELIVERED next
 230                                                 * time through.
 231                                                 */
 232                                                isci_task_refuse(ihost, task,
 233                                                        SAS_TASK_COMPLETE,
 234                                                        SAS_QUEUE_FULL);
 235                                        }
 236                                }
 237                        }
 238                }
 239                if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
 240                        spin_lock_irqsave(&ihost->scic_lock, flags);
 241                        /* command never hit the device, so just free
 242                         * the tci and skip the sequence increment
 243                         */
 244                        isci_tci_free(ihost, ISCI_TAG_TCI(tag));
 245                        spin_unlock_irqrestore(&ihost->scic_lock, flags);
 246                }
 247                isci_put_device(idev);
 248        }
 249        return 0;
 250}
 251
 252static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
 253{
 254        struct isci_tmf *isci_tmf;
 255        enum sci_status status;
 256
 257        if (!test_bit(IREQ_TMF, &ireq->flags))
 258                return SCI_FAILURE;
 259
 260        isci_tmf = isci_request_access_tmf(ireq);
 261
 262        switch (isci_tmf->tmf_code) {
 263
 264        case isci_tmf_sata_srst_high:
 265        case isci_tmf_sata_srst_low: {
 266                struct host_to_dev_fis *fis = &ireq->stp.cmd;
 267
 268                memset(fis, 0, sizeof(*fis));
 269
 270                fis->fis_type  =  0x27;
 271                fis->flags     &= ~0x80;
 272                fis->flags     &= 0xF0;
 273                if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
 274                        fis->control |= ATA_SRST;
 275                else
 276                        fis->control &= ~ATA_SRST;
 277                break;
 278        }
 279        /* other management commnd go here... */
 280        default:
 281                return SCI_FAILURE;
 282        }
 283
 284        /* core builds the protocol specific request
 285         *  based on the h2d fis.
 286         */
 287        status = sci_task_request_construct_sata(ireq);
 288
 289        return status;
 290}
 291
 292static struct isci_request *isci_task_request_build(struct isci_host *ihost,
 293                                                    struct isci_remote_device *idev,
 294                                                    u16 tag, struct isci_tmf *isci_tmf)
 295{
 296        enum sci_status status = SCI_FAILURE;
 297        struct isci_request *ireq = NULL;
 298        struct domain_device *dev;
 299
 300        dev_dbg(&ihost->pdev->dev,
 301                "%s: isci_tmf = %p\n", __func__, isci_tmf);
 302
 303        dev = idev->domain_dev;
 304
 305        /* do common allocation and init of request object. */
 306        ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
 307        if (!ireq)
 308                return NULL;
 309
 310        /* let the core do it's construct. */
 311        status = sci_task_request_construct(ihost, idev, tag,
 312                                             ireq);
 313
 314        if (status != SCI_SUCCESS) {
 315                dev_warn(&ihost->pdev->dev,
 316                         "%s: sci_task_request_construct failed - "
 317                         "status = 0x%x\n",
 318                         __func__,
 319                         status);
 320                return NULL;
 321        }
 322
 323        /* XXX convert to get this from task->tproto like other drivers */
 324        if (dev->dev_type == SAS_END_DEV) {
 325                isci_tmf->proto = SAS_PROTOCOL_SSP;
 326                status = sci_task_request_construct_ssp(ireq);
 327                if (status != SCI_SUCCESS)
 328                        return NULL;
 329        }
 330
 331        if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
 332                isci_tmf->proto = SAS_PROTOCOL_SATA;
 333                status = isci_sata_management_task_request_build(ireq);
 334
 335                if (status != SCI_SUCCESS)
 336                        return NULL;
 337        }
 338        return ireq;
 339}
 340
 341/**
 342* isci_request_mark_zombie() - This function must be called with scic_lock held.
 343*/
 344static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
 345{
 346        struct completion *tmf_completion = NULL;
 347        struct completion *req_completion;
 348
 349        /* Set the request state to "dead". */
 350        ireq->status = dead;
 351
 352        req_completion = ireq->io_request_completion;
 353        ireq->io_request_completion = NULL;
 354
 355        if (test_bit(IREQ_TMF, &ireq->flags)) {
 356                /* Break links with the TMF request. */
 357                struct isci_tmf *tmf = isci_request_access_tmf(ireq);
 358
 359                /* In the case where a task request is dying,
 360                 * the thread waiting on the complete will sit and
 361                 * timeout unless we wake it now.  Since the TMF
 362                 * has a default error status, complete it here
 363                 * to wake the waiting thread.
 364                 */
 365                if (tmf) {
 366                        tmf_completion = tmf->complete;
 367                        tmf->complete = NULL;
 368                }
 369                ireq->ttype_ptr.tmf_task_ptr = NULL;
 370                dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
 371                        __func__, tmf->tmf_code, tmf->io_tag);
 372        } else {
 373                /* Break links with the sas_task - the callback is done
 374                 * elsewhere.
 375                 */
 376                struct sas_task *task = isci_request_access_task(ireq);
 377
 378                if (task)
 379                        task->lldd_task = NULL;
 380
 381                ireq->ttype_ptr.io_task_ptr = NULL;
 382        }
 383
 384        dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
 385                 ireq->io_tag);
 386
 387        /* Don't force waiting threads to timeout. */
 388        if (req_completion)
 389                complete(req_completion);
 390
 391        if (tmf_completion != NULL)
 392                complete(tmf_completion);
 393}
 394
 395static int isci_task_execute_tmf(struct isci_host *ihost,
 396                                 struct isci_remote_device *idev,
 397                                 struct isci_tmf *tmf, unsigned long timeout_ms)
 398{
 399        DECLARE_COMPLETION_ONSTACK(completion);
 400        enum sci_task_status status = SCI_TASK_FAILURE;
 401        struct isci_request *ireq;
 402        int ret = TMF_RESP_FUNC_FAILED;
 403        unsigned long flags;
 404        unsigned long timeleft;
 405        u16 tag;
 406
 407        spin_lock_irqsave(&ihost->scic_lock, flags);
 408        tag = isci_alloc_tag(ihost);
 409        spin_unlock_irqrestore(&ihost->scic_lock, flags);
 410
 411        if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
 412                return ret;
 413
 414        /* sanity check, return TMF_RESP_FUNC_FAILED
 415         * if the device is not there and ready.
 416         */
 417        if (!idev ||
 418            (!test_bit(IDEV_IO_READY, &idev->flags) &&
 419             !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
 420                dev_dbg(&ihost->pdev->dev,
 421                        "%s: idev = %p not ready (%#lx)\n",
 422                        __func__,
 423                        idev, idev ? idev->flags : 0);
 424                goto err_tci;
 425        } else
 426                dev_dbg(&ihost->pdev->dev,
 427                        "%s: idev = %p\n",
 428                        __func__, idev);
 429
 430        /* Assign the pointer to the TMF's completion kernel wait structure. */
 431        tmf->complete = &completion;
 432        tmf->status = SCI_FAILURE_TIMEOUT;
 433
 434        ireq = isci_task_request_build(ihost, idev, tag, tmf);
 435        if (!ireq)
 436                goto err_tci;
 437
 438        spin_lock_irqsave(&ihost->scic_lock, flags);
 439
 440        /* start the TMF io. */
 441        status = sci_controller_start_task(ihost, idev, ireq);
 442
 443        if (status != SCI_TASK_SUCCESS) {
 444                dev_dbg(&ihost->pdev->dev,
 445                         "%s: start_io failed - status = 0x%x, request = %p\n",
 446                         __func__,
 447                         status,
 448                         ireq);
 449                spin_unlock_irqrestore(&ihost->scic_lock, flags);
 450                goto err_tci;
 451        }
 452
 453        if (tmf->cb_state_func != NULL)
 454                tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
 455
 456        isci_request_change_state(ireq, started);
 457
 458        /* add the request to the remote device request list. */
 459        list_add(&ireq->dev_node, &idev->reqs_in_process);
 460
 461        spin_unlock_irqrestore(&ihost->scic_lock, flags);
 462
 463        /* Wait for the TMF to complete, or a timeout. */
 464        timeleft = wait_for_completion_timeout(&completion,
 465                                               msecs_to_jiffies(timeout_ms));
 466
 467        if (timeleft == 0) {
 468                /* The TMF did not complete - this could be because
 469                 * of an unplug.  Terminate the TMF request now.
 470                 */
 471                spin_lock_irqsave(&ihost->scic_lock, flags);
 472
 473                if (tmf->cb_state_func != NULL)
 474                        tmf->cb_state_func(isci_tmf_timed_out, tmf,
 475                                           tmf->cb_data);
 476
 477                sci_controller_terminate_request(ihost, idev, ireq);
 478
 479                spin_unlock_irqrestore(&ihost->scic_lock, flags);
 480
 481                timeleft = wait_for_completion_timeout(
 482                        &completion,
 483                        msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
 484
 485                if (!timeleft) {
 486                        /* Strange condition - the termination of the TMF
 487                         * request timed-out.
 488                         */
 489                        spin_lock_irqsave(&ihost->scic_lock, flags);
 490
 491                        /* If the TMF status has not changed, kill it. */
 492                        if (tmf->status == SCI_FAILURE_TIMEOUT)
 493                                isci_request_mark_zombie(ihost, ireq);
 494
 495                        spin_unlock_irqrestore(&ihost->scic_lock, flags);
 496                }
 497        }
 498
 499        isci_print_tmf(tmf);
 500
 501        if (tmf->status == SCI_SUCCESS)
 502                ret =  TMF_RESP_FUNC_COMPLETE;
 503        else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
 504                dev_dbg(&ihost->pdev->dev,
 505                        "%s: tmf.status == "
 506                        "SCI_FAILURE_IO_RESPONSE_VALID\n",
 507                        __func__);
 508                ret =  TMF_RESP_FUNC_COMPLETE;
 509        }
 510        /* Else - leave the default "failed" status alone. */
 511
 512        dev_dbg(&ihost->pdev->dev,
 513                "%s: completed request = %p\n",
 514                __func__,
 515                ireq);
 516
 517        return ret;
 518
 519 err_tci:
 520        spin_lock_irqsave(&ihost->scic_lock, flags);
 521        isci_tci_free(ihost, ISCI_TAG_TCI(tag));
 522        spin_unlock_irqrestore(&ihost->scic_lock, flags);
 523
 524        return ret;
 525}
 526
 527static void isci_task_build_tmf(struct isci_tmf *tmf,
 528                                enum isci_tmf_function_codes code,
 529                                void (*tmf_sent_cb)(enum isci_tmf_cb_state,
 530                                                    struct isci_tmf *,
 531                                                    void *),
 532                                void *cb_data)
 533{
 534        memset(tmf, 0, sizeof(*tmf));
 535
 536        tmf->tmf_code      = code;
 537        tmf->cb_state_func = tmf_sent_cb;
 538        tmf->cb_data       = cb_data;
 539}
 540
 541static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
 542                                           enum isci_tmf_function_codes code,
 543                                           void (*tmf_sent_cb)(enum isci_tmf_cb_state,
 544                                                               struct isci_tmf *,
 545                                                               void *),
 546                                           struct isci_request *old_request)
 547{
 548        isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
 549        tmf->io_tag = old_request->io_tag;
 550}
 551
 552/**
 553 * isci_task_validate_request_to_abort() - This function checks the given I/O
 554 *    against the "started" state.  If the request is still "started", it's
 555 *    state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
 556 *    BEFORE CALLING THIS FUNCTION.
 557 * @isci_request: This parameter specifies the request object to control.
 558 * @isci_host: This parameter specifies the ISCI host object
 559 * @isci_device: This is the device to which the request is pending.
 560 * @aborted_io_completion: This is a completion structure that will be added to
 561 *    the request in case it is changed to aborting; this completion is
 562 *    triggered when the request is fully completed.
 563 *
 564 * Either "started" on successful change of the task status to "aborted", or
 565 * "unallocated" if the task cannot be controlled.
 566 */
 567static enum isci_request_status isci_task_validate_request_to_abort(
 568        struct isci_request *isci_request,
 569        struct isci_host *isci_host,
 570        struct isci_remote_device *isci_device,
 571        struct completion *aborted_io_completion)
 572{
 573        enum isci_request_status old_state = unallocated;
 574
 575        /* Only abort the task if it's in the
 576         *  device's request_in_process list
 577         */
 578        if (isci_request && !list_empty(&isci_request->dev_node)) {
 579                old_state = isci_request_change_started_to_aborted(
 580                        isci_request, aborted_io_completion);
 581
 582        }
 583
 584        return old_state;
 585}
 586
 587static int isci_request_is_dealloc_managed(enum isci_request_status stat)
 588{
 589        switch (stat) {
 590        case aborted:
 591        case aborting:
 592        case terminating:
 593        case completed:
 594        case dead:
 595                return true;
 596        default:
 597                return false;
 598        }
 599}
 600
 601/**
 602 * isci_terminate_request_core() - This function will terminate the given
 603 *    request, and wait for it to complete.  This function must only be called
 604 *    from a thread that can wait.  Note that the request is terminated and
 605 *    completed (back to the host, if started there).
 606 * @ihost: This SCU.
 607 * @idev: The target.
 608 * @isci_request: The I/O request to be terminated.
 609 *
 610 */
 611static void isci_terminate_request_core(struct isci_host *ihost,
 612                                        struct isci_remote_device *idev,
 613                                        struct isci_request *isci_request)
 614{
 615        enum sci_status status      = SCI_SUCCESS;
 616        bool was_terminated         = false;
 617        bool needs_cleanup_handling = false;
 618        unsigned long     flags;
 619        unsigned long     termination_completed = 1;
 620        struct completion *io_request_completion;
 621
 622        dev_dbg(&ihost->pdev->dev,
 623                "%s: device = %p; request = %p\n",
 624                __func__, idev, isci_request);
 625
 626        spin_lock_irqsave(&ihost->scic_lock, flags);
 627
 628        io_request_completion = isci_request->io_request_completion;
 629
 630        /* Note that we are not going to control
 631         * the target to abort the request.
 632         */
 633        set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
 634
 635        /* Make sure the request wasn't just sitting around signalling
 636         * device condition (if the request handle is NULL, then the
 637         * request completed but needed additional handling here).
 638         */
 639        if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
 640                was_terminated = true;
 641                needs_cleanup_handling = true;
 642                status = sci_controller_terminate_request(ihost,
 643                                                           idev,
 644                                                           isci_request);
 645        }
 646        spin_unlock_irqrestore(&ihost->scic_lock, flags);
 647
 648        /*
 649         * The only time the request to terminate will
 650         * fail is when the io request is completed and
 651         * being aborted.
 652         */
 653        if (status != SCI_SUCCESS) {
 654                dev_dbg(&ihost->pdev->dev,
 655                        "%s: sci_controller_terminate_request"
 656                        " returned = 0x%x\n",
 657                        __func__, status);
 658
 659                isci_request->io_request_completion = NULL;
 660
 661        } else {
 662                if (was_terminated) {
 663                        dev_dbg(&ihost->pdev->dev,
 664                                "%s: before completion wait (%p/%p)\n",
 665                                __func__, isci_request, io_request_completion);
 666
 667                        /* Wait here for the request to complete. */
 668                        termination_completed
 669                                = wait_for_completion_timeout(
 670                                   io_request_completion,
 671                                   msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
 672
 673                        if (!termination_completed) {
 674
 675                                /* The request to terminate has timed out.  */
 676                                spin_lock_irqsave(&ihost->scic_lock, flags);
 677
 678                                /* Check for state changes. */
 679                                if (!test_bit(IREQ_TERMINATED,
 680                                              &isci_request->flags)) {
 681
 682                                        /* The best we can do is to have the
 683                                         * request die a silent death if it
 684                                         * ever really completes.
 685                                         */
 686                                        isci_request_mark_zombie(ihost,
 687                                                                 isci_request);
 688                                        needs_cleanup_handling = true;
 689                                } else
 690                                        termination_completed = 1;
 691
 692                                spin_unlock_irqrestore(&ihost->scic_lock,
 693                                                       flags);
 694
 695                                if (!termination_completed) {
 696
 697                                        dev_dbg(&ihost->pdev->dev,
 698                                                "%s: *** Timeout waiting for "
 699                                                "termination(%p/%p)\n",
 700                                                __func__, io_request_completion,
 701                                                isci_request);
 702
 703                                        /* The request can no longer be referenced
 704                                         * safely since it may go away if the
 705                                         * termination every really does complete.
 706                                         */
 707                                        isci_request = NULL;
 708                                }
 709                        }
 710                        if (termination_completed)
 711                                dev_dbg(&ihost->pdev->dev,
 712                                        "%s: after completion wait (%p/%p)\n",
 713                                        __func__, isci_request, io_request_completion);
 714                }
 715
 716                if (termination_completed) {
 717
 718                        isci_request->io_request_completion = NULL;
 719
 720                        /* Peek at the status of the request.  This will tell
 721                         * us if there was special handling on the request such that it
 722                         * needs to be detached and freed here.
 723                         */
 724                        spin_lock_irqsave(&isci_request->state_lock, flags);
 725
 726                        needs_cleanup_handling
 727                                = isci_request_is_dealloc_managed(
 728                                        isci_request->status);
 729
 730                        spin_unlock_irqrestore(&isci_request->state_lock, flags);
 731
 732                }
 733                if (needs_cleanup_handling) {
 734
 735                        dev_dbg(&ihost->pdev->dev,
 736                                "%s: cleanup isci_device=%p, request=%p\n",
 737                                __func__, idev, isci_request);
 738
 739                        if (isci_request != NULL) {
 740                                spin_lock_irqsave(&ihost->scic_lock, flags);
 741                                isci_free_tag(ihost, isci_request->io_tag);
 742                                isci_request_change_state(isci_request, unallocated);
 743                                list_del_init(&isci_request->dev_node);
 744                                spin_unlock_irqrestore(&ihost->scic_lock, flags);
 745                        }
 746                }
 747        }
 748}
 749
 750/**
 751 * isci_terminate_pending_requests() - This function will change the all of the
 752 *    requests on the given device's state to "aborting", will terminate the
 753 *    requests, and wait for them to complete.  This function must only be
 754 *    called from a thread that can wait.  Note that the requests are all
 755 *    terminated and completed (back to the host, if started there).
 756 * @isci_host: This parameter specifies SCU.
 757 * @idev: This parameter specifies the target.
 758 *
 759 */
 760void isci_terminate_pending_requests(struct isci_host *ihost,
 761                                     struct isci_remote_device *idev)
 762{
 763        struct completion request_completion;
 764        enum isci_request_status old_state;
 765        unsigned long flags;
 766        LIST_HEAD(list);
 767
 768        spin_lock_irqsave(&ihost->scic_lock, flags);
 769        list_splice_init(&idev->reqs_in_process, &list);
 770
 771        /* assumes that isci_terminate_request_core deletes from the list */
 772        while (!list_empty(&list)) {
 773                struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
 774
 775                /* Change state to "terminating" if it is currently
 776                 * "started".
 777                 */
 778                old_state = isci_request_change_started_to_newstate(ireq,
 779                                                                    &request_completion,
 780                                                                    terminating);
 781                switch (old_state) {
 782                case started:
 783                case completed:
 784                case aborting:
 785                        break;
 786                default:
 787                        /* termination in progress, or otherwise dispositioned.
 788                         * We know the request was on 'list' so should be safe
 789                         * to move it back to reqs_in_process
 790                         */
 791                        list_move(&ireq->dev_node, &idev->reqs_in_process);
 792                        ireq = NULL;
 793                        break;
 794                }
 795
 796                if (!ireq)
 797                        continue;
 798                spin_unlock_irqrestore(&ihost->scic_lock, flags);
 799
 800                init_completion(&request_completion);
 801
 802                dev_dbg(&ihost->pdev->dev,
 803                         "%s: idev=%p request=%p; task=%p old_state=%d\n",
 804                         __func__, idev, ireq,
 805                        (!test_bit(IREQ_TMF, &ireq->flags)
 806                                ? isci_request_access_task(ireq)
 807                                : NULL),
 808                        old_state);
 809
 810                /* If the old_state is started:
 811                 * This request was not already being aborted. If it had been,
 812                 * then the aborting I/O (ie. the TMF request) would not be in
 813                 * the aborting state, and thus would be terminated here.  Note
 814                 * that since the TMF completion's call to the kernel function
 815                 * "complete()" does not happen until the pending I/O request
 816                 * terminate fully completes, we do not have to implement a
 817                 * special wait here for already aborting requests - the
 818                 * termination of the TMF request will force the request
 819                 * to finish it's already started terminate.
 820                 *
 821                 * If old_state == completed:
 822                 * This request completed from the SCU hardware perspective
 823                 * and now just needs cleaning up in terms of freeing the
 824                 * request and potentially calling up to libsas.
 825                 *
 826                 * If old_state == aborting:
 827                 * This request has already gone through a TMF timeout, but may
 828                 * not have been terminated; needs cleaning up at least.
 829                 */
 830                isci_terminate_request_core(ihost, idev, ireq);
 831                spin_lock_irqsave(&ihost->scic_lock, flags);
 832        }
 833        spin_unlock_irqrestore(&ihost->scic_lock, flags);
 834}
 835
 836/**
 837 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
 838 *    Template functions.
 839 * @lun: This parameter specifies the lun to be reset.
 840 *
 841 * status, zero indicates success.
 842 */
 843static int isci_task_send_lu_reset_sas(
 844        struct isci_host *isci_host,
 845        struct isci_remote_device *isci_device,
 846        u8 *lun)
 847{
 848        struct isci_tmf tmf;
 849        int ret = TMF_RESP_FUNC_FAILED;
 850
 851        dev_dbg(&isci_host->pdev->dev,
 852                "%s: isci_host = %p, isci_device = %p\n",
 853                __func__, isci_host, isci_device);
 854        /* Send the LUN reset to the target.  By the time the call returns,
 855         * the TMF has fully exected in the target (in which case the return
 856         * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
 857         * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
 858         */
 859        isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
 860
 861        #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
 862        ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
 863
 864        if (ret == TMF_RESP_FUNC_COMPLETE)
 865                dev_dbg(&isci_host->pdev->dev,
 866                        "%s: %p: TMF_LU_RESET passed\n",
 867                        __func__, isci_device);
 868        else
 869                dev_dbg(&isci_host->pdev->dev,
 870                        "%s: %p: TMF_LU_RESET failed (%x)\n",
 871                        __func__, isci_device, ret);
 872
 873        return ret;
 874}
 875
 876static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
 877                                 struct isci_remote_device *idev, u8 *lun)
 878{
 879        int ret = TMF_RESP_FUNC_FAILED;
 880        struct isci_tmf tmf;
 881
 882        /* Send the soft reset to the target */
 883        #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
 884        isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
 885
 886        ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
 887
 888        if (ret != TMF_RESP_FUNC_COMPLETE) {
 889                dev_dbg(&ihost->pdev->dev,
 890                         "%s: Assert SRST failed (%p) = %x",
 891                         __func__, idev, ret);
 892
 893                /* Return the failure so that the LUN reset is escalated
 894                 * to a target reset.
 895                 */
 896        }
 897        return ret;
 898}
 899
 900/**
 901 * isci_task_lu_reset() - This function is one of the SAS Domain Template
 902 *    functions. This is one of the Task Management functoins called by libsas,
 903 *    to reset the given lun. Note the assumption that while this call is
 904 *    executing, no I/O will be sent by the host to the device.
 905 * @lun: This parameter specifies the lun to be reset.
 906 *
 907 * status, zero indicates success.
 908 */
 909int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
 910{
 911        struct isci_host *isci_host = dev_to_ihost(domain_device);
 912        struct isci_remote_device *isci_device;
 913        unsigned long flags;
 914        int ret;
 915
 916        spin_lock_irqsave(&isci_host->scic_lock, flags);
 917        isci_device = isci_lookup_device(domain_device);
 918        spin_unlock_irqrestore(&isci_host->scic_lock, flags);
 919
 920        dev_dbg(&isci_host->pdev->dev,
 921                "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
 922                 __func__, domain_device, isci_host, isci_device);
 923
 924        if (!isci_device) {
 925                /* If the device is gone, stop the escalations. */
 926                dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__);
 927
 928                ret = TMF_RESP_FUNC_COMPLETE;
 929                goto out;
 930        }
 931        set_bit(IDEV_EH, &isci_device->flags);
 932
 933        /* Send the task management part of the reset. */
 934        if (sas_protocol_ata(domain_device->tproto)) {
 935                ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
 936        } else
 937                ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
 938
 939        /* If the LUN reset worked, all the I/O can now be terminated. */
 940        if (ret == TMF_RESP_FUNC_COMPLETE)
 941                /* Terminate all I/O now. */
 942                isci_terminate_pending_requests(isci_host,
 943                                                isci_device);
 944
 945 out:
 946        isci_put_device(isci_device);
 947        return ret;
 948}
 949
 950
 951/*       int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
 952int isci_task_clear_nexus_port(struct asd_sas_port *port)
 953{
 954        return TMF_RESP_FUNC_FAILED;
 955}
 956
 957
 958
 959int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
 960{
 961        return TMF_RESP_FUNC_FAILED;
 962}
 963
 964/* Task Management Functions. Must be called from process context.       */
 965
 966/**
 967 * isci_abort_task_process_cb() - This is a helper function for the abort task
 968 *    TMF command.  It manages the request state with respect to the successful
 969 *    transmission / completion of the abort task request.
 970 * @cb_state: This parameter specifies when this function was called - after
 971 *    the TMF request has been started and after it has timed-out.
 972 * @tmf: This parameter specifies the TMF in progress.
 973 *
 974 *
 975 */
 976static void isci_abort_task_process_cb(
 977        enum isci_tmf_cb_state cb_state,
 978        struct isci_tmf *tmf,
 979        void *cb_data)
 980{
 981        struct isci_request *old_request;
 982
 983        old_request = (struct isci_request *)cb_data;
 984
 985        dev_dbg(&old_request->isci_host->pdev->dev,
 986                "%s: tmf=%p, old_request=%p\n",
 987                __func__, tmf, old_request);
 988
 989        switch (cb_state) {
 990
 991        case isci_tmf_started:
 992                /* The TMF has been started.  Nothing to do here, since the
 993                 * request state was already set to "aborted" by the abort
 994                 * task function.
 995                 */
 996                if ((old_request->status != aborted)
 997                        && (old_request->status != completed))
 998                        dev_dbg(&old_request->isci_host->pdev->dev,
 999                                "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
1000                                __func__, old_request->status, tmf, old_request);
1001                break;
1002
1003        case isci_tmf_timed_out:
1004
1005                /* Set the task's state to "aborting", since the abort task
1006                 * function thread set it to "aborted" (above) in anticipation
1007                 * of the task management request working correctly.  Since the
1008                 * timeout has now fired, the TMF request failed.  We set the
1009                 * state such that the request completion will indicate the
1010                 * device is no longer present.
1011                 */
1012                isci_request_change_state(old_request, aborting);
1013                break;
1014
1015        default:
1016                dev_dbg(&old_request->isci_host->pdev->dev,
1017                        "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
1018                        __func__, cb_state, tmf, old_request);
1019                break;
1020        }
1021}
1022
1023/**
1024 * isci_task_abort_task() - This function is one of the SAS Domain Template
1025 *    functions. This function is called by libsas to abort a specified task.
1026 * @task: This parameter specifies the SAS task to abort.
1027 *
1028 * status, zero indicates success.
1029 */
1030int isci_task_abort_task(struct sas_task *task)
1031{
1032        struct isci_host *isci_host = dev_to_ihost(task->dev);
1033        DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
1034        struct isci_request       *old_request = NULL;
1035        enum isci_request_status  old_state;
1036        struct isci_remote_device *isci_device = NULL;
1037        struct isci_tmf           tmf;
1038        int                       ret = TMF_RESP_FUNC_FAILED;
1039        unsigned long             flags;
1040        int                       perform_termination = 0;
1041
1042        /* Get the isci_request reference from the task.  Note that
1043         * this check does not depend on the pending request list
1044         * in the device, because tasks driving resets may land here
1045         * after completion in the core.
1046         */
1047        spin_lock_irqsave(&isci_host->scic_lock, flags);
1048        spin_lock(&task->task_state_lock);
1049
1050        old_request = task->lldd_task;
1051
1052        /* If task is already done, the request isn't valid */
1053        if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
1054            (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
1055            old_request)
1056                isci_device = isci_lookup_device(task->dev);
1057
1058        spin_unlock(&task->task_state_lock);
1059        spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1060
1061        dev_dbg(&isci_host->pdev->dev,
1062                "%s: dev = %p, task = %p, old_request == %p\n",
1063                __func__, isci_device, task, old_request);
1064
1065        if (isci_device)
1066                set_bit(IDEV_EH, &isci_device->flags);
1067
1068        /* Device reset conditions signalled in task_state_flags are the
1069         * responsbility of libsas to observe at the start of the error
1070         * handler thread.
1071         */
1072        if (!isci_device || !old_request) {
1073                /* The request has already completed and there
1074                * is nothing to do here other than to set the task
1075                * done bit, and indicate that the task abort function
1076                * was sucessful.
1077                */
1078                spin_lock_irqsave(&task->task_state_lock, flags);
1079                task->task_state_flags |= SAS_TASK_STATE_DONE;
1080                task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1081                                            SAS_TASK_STATE_PENDING);
1082                spin_unlock_irqrestore(&task->task_state_lock, flags);
1083
1084                ret = TMF_RESP_FUNC_COMPLETE;
1085
1086                dev_dbg(&isci_host->pdev->dev,
1087                        "%s: abort task not needed for %p\n",
1088                        __func__, task);
1089                goto out;
1090        }
1091
1092        spin_lock_irqsave(&isci_host->scic_lock, flags);
1093
1094        /* Check the request status and change to "aborted" if currently
1095         * "starting"; if true then set the I/O kernel completion
1096         * struct that will be triggered when the request completes.
1097         */
1098        old_state = isci_task_validate_request_to_abort(
1099                                old_request, isci_host, isci_device,
1100                                &aborted_io_completion);
1101        if ((old_state != started) &&
1102            (old_state != completed) &&
1103            (old_state != aborting)) {
1104
1105                spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1106
1107                /* The request was already being handled by someone else (because
1108                * they got to set the state away from started).
1109                */
1110                dev_dbg(&isci_host->pdev->dev,
1111                        "%s:  device = %p; old_request %p already being aborted\n",
1112                        __func__,
1113                        isci_device, old_request);
1114                ret = TMF_RESP_FUNC_COMPLETE;
1115                goto out;
1116        }
1117        if (task->task_proto == SAS_PROTOCOL_SMP ||
1118            sas_protocol_ata(task->task_proto) ||
1119            test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1120
1121                spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1122
1123                dev_dbg(&isci_host->pdev->dev,
1124                        "%s: %s request"
1125                        " or complete_in_target (%d), thus no TMF\n",
1126                        __func__,
1127                        ((task->task_proto == SAS_PROTOCOL_SMP)
1128                                ? "SMP"
1129                                : (sas_protocol_ata(task->task_proto)
1130                                        ? "SATA/STP"
1131                                        : "<other>")
1132                         ),
1133                        test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
1134
1135                if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
1136                        spin_lock_irqsave(&task->task_state_lock, flags);
1137                        task->task_state_flags |= SAS_TASK_STATE_DONE;
1138                        task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1139                                                    SAS_TASK_STATE_PENDING);
1140                        spin_unlock_irqrestore(&task->task_state_lock, flags);
1141                        ret = TMF_RESP_FUNC_COMPLETE;
1142                } else {
1143                        spin_lock_irqsave(&task->task_state_lock, flags);
1144                        task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
1145                                                    SAS_TASK_STATE_PENDING);
1146                        spin_unlock_irqrestore(&task->task_state_lock, flags);
1147                }
1148
1149                /* STP and SMP devices are not sent a TMF, but the
1150                 * outstanding I/O request is terminated below.  This is
1151                 * because SATA/STP and SMP discovery path timeouts directly
1152                 * call the abort task interface for cleanup.
1153                 */
1154                perform_termination = 1;
1155
1156        } else {
1157                /* Fill in the tmf stucture */
1158                isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
1159                                               isci_abort_task_process_cb,
1160                                               old_request);
1161
1162                spin_unlock_irqrestore(&isci_host->scic_lock, flags);
1163
1164                #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
1165                ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
1166                                            ISCI_ABORT_TASK_TIMEOUT_MS);
1167
1168                if (ret == TMF_RESP_FUNC_COMPLETE)
1169                        perform_termination = 1;
1170                else
1171                        dev_dbg(&isci_host->pdev->dev,
1172                                "%s: isci_task_send_tmf failed\n", __func__);
1173        }
1174        if (perform_termination) {
1175                set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
1176
1177                /* Clean up the request on our side, and wait for the aborted
1178                 * I/O to complete.
1179                 */
1180                isci_terminate_request_core(isci_host, isci_device,
1181                                            old_request);
1182        }
1183
1184        /* Make sure we do not leave a reference to aborted_io_completion */
1185        old_request->io_request_completion = NULL;
1186 out:
1187        isci_put_device(isci_device);
1188        return ret;
1189}
1190
1191/**
1192 * isci_task_abort_task_set() - This function is one of the SAS Domain Template
1193 *    functions. This is one of the Task Management functoins called by libsas,
1194 *    to abort all task for the given lun.
1195 * @d_device: This parameter specifies the domain device associated with this
1196 *    request.
1197 * @lun: This parameter specifies the lun associated with this request.
1198 *
1199 * status, zero indicates success.
1200 */
1201int isci_task_abort_task_set(
1202        struct domain_device *d_device,
1203        u8 *lun)
1204{
1205        return TMF_RESP_FUNC_FAILED;
1206}
1207
1208
1209/**
1210 * isci_task_clear_aca() - This function is one of the SAS Domain Template
1211 *    functions. This is one of the Task Management functoins called by libsas.
1212 * @d_device: This parameter specifies the domain device associated with this
1213 *    request.
1214 * @lun: This parameter specifies the lun        associated with this request.
1215 *
1216 * status, zero indicates success.
1217 */
1218int isci_task_clear_aca(
1219        struct domain_device *d_device,
1220        u8 *lun)
1221{
1222        return TMF_RESP_FUNC_FAILED;
1223}
1224
1225
1226
1227/**
1228 * isci_task_clear_task_set() - This function is one of the SAS Domain Template
1229 *    functions. This is one of the Task Management functoins called by libsas.
1230 * @d_device: This parameter specifies the domain device associated with this
1231 *    request.
1232 * @lun: This parameter specifies the lun        associated with this request.
1233 *
1234 * status, zero indicates success.
1235 */
1236int isci_task_clear_task_set(
1237        struct domain_device *d_device,
1238        u8 *lun)
1239{
1240        return TMF_RESP_FUNC_FAILED;
1241}
1242
1243
1244/**
1245 * isci_task_query_task() - This function is implemented to cause libsas to
1246 *    correctly escalate the failed abort to a LUN or target reset (this is
1247 *    because sas_scsi_find_task libsas function does not correctly interpret
1248 *    all return codes from the abort task call).  When TMF_RESP_FUNC_SUCC is
1249 *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
1250 *    returned, libsas will turn this into a target reset
1251 * @task: This parameter specifies the sas task being queried.
1252 * @lun: This parameter specifies the lun associated with this request.
1253 *
1254 * status, zero indicates success.
1255 */
1256int isci_task_query_task(
1257        struct sas_task *task)
1258{
1259        /* See if there is a pending device reset for this device. */
1260        if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
1261                return TMF_RESP_FUNC_FAILED;
1262        else
1263                return TMF_RESP_FUNC_SUCC;
1264}
1265
1266/*
1267 * isci_task_request_complete() - This function is called by the sci core when
1268 *    an task request completes.
1269 * @ihost: This parameter specifies the ISCI host object
1270 * @ireq: This parameter is the completed isci_request object.
1271 * @completion_status: This parameter specifies the completion status from the
1272 *    sci core.
1273 *
1274 * none.
1275 */
1276void
1277isci_task_request_complete(struct isci_host *ihost,
1278                           struct isci_request *ireq,
1279                           enum sci_task_status completion_status)
1280{
1281        struct isci_tmf *tmf = isci_request_access_tmf(ireq);
1282        struct completion *tmf_complete = NULL;
1283        struct completion *request_complete = ireq->io_request_completion;
1284
1285        dev_dbg(&ihost->pdev->dev,
1286                "%s: request = %p, status=%d\n",
1287                __func__, ireq, completion_status);
1288
1289        isci_request_change_state(ireq, completed);
1290
1291        set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
1292
1293        if (tmf) {
1294                tmf->status = completion_status;
1295
1296                if (tmf->proto == SAS_PROTOCOL_SSP) {
1297                        memcpy(&tmf->resp.resp_iu,
1298                               &ireq->ssp.rsp,
1299                               SSP_RESP_IU_MAX_SIZE);
1300                } else if (tmf->proto == SAS_PROTOCOL_SATA) {
1301                        memcpy(&tmf->resp.d2h_fis,
1302                               &ireq->stp.rsp,
1303                               sizeof(struct dev_to_host_fis));
1304                }
1305                /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
1306                tmf_complete = tmf->complete;
1307        }
1308        sci_controller_complete_io(ihost, ireq->target_device, ireq);
1309        /* set the 'terminated' flag handle to make sure it cannot be terminated
1310         *  or completed again.
1311         */
1312        set_bit(IREQ_TERMINATED, &ireq->flags);
1313
1314        /* As soon as something is in the terminate path, deallocation is
1315         * managed there.  Note that the final non-managed state of a task
1316         * request is "completed".
1317         */
1318        if ((ireq->status == completed) ||
1319            !isci_request_is_dealloc_managed(ireq->status)) {
1320                isci_request_change_state(ireq, unallocated);
1321                isci_free_tag(ihost, ireq->io_tag);
1322                list_del_init(&ireq->dev_node);
1323        }
1324
1325        /* "request_complete" is set if the task was being terminated. */
1326        if (request_complete)
1327                complete(request_complete);
1328
1329        /* The task management part completes last. */
1330        if (tmf_complete)
1331                complete(tmf_complete);
1332}
1333
1334static int isci_reset_device(struct isci_host *ihost,
1335                             struct isci_remote_device *idev)
1336{
1337        struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
1338        enum sci_status status;
1339        unsigned long flags;
1340        int rc;
1341
1342        dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1343
1344        spin_lock_irqsave(&ihost->scic_lock, flags);
1345        status = sci_remote_device_reset(idev);
1346        if (status != SCI_SUCCESS) {
1347                spin_unlock_irqrestore(&ihost->scic_lock, flags);
1348
1349                dev_dbg(&ihost->pdev->dev,
1350                         "%s: sci_remote_device_reset(%p) returned %d!\n",
1351                         __func__, idev, status);
1352
1353                return TMF_RESP_FUNC_FAILED;
1354        }
1355        spin_unlock_irqrestore(&ihost->scic_lock, flags);
1356
1357        rc = sas_phy_reset(phy, true);
1358
1359        /* Terminate in-progress I/O now. */
1360        isci_remote_device_nuke_requests(ihost, idev);
1361
1362        /* Since all pending TCs have been cleaned, resume the RNC. */
1363        spin_lock_irqsave(&ihost->scic_lock, flags);
1364        status = sci_remote_device_reset_complete(idev);
1365        spin_unlock_irqrestore(&ihost->scic_lock, flags);
1366
1367        if (status != SCI_SUCCESS) {
1368                dev_dbg(&ihost->pdev->dev,
1369                         "%s: sci_remote_device_reset_complete(%p) "
1370                         "returned %d!\n", __func__, idev, status);
1371        }
1372
1373        dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1374
1375        return rc;
1376}
1377
1378int isci_task_I_T_nexus_reset(struct domain_device *dev)
1379{
1380        struct isci_host *ihost = dev_to_ihost(dev);
1381        struct isci_remote_device *idev;
1382        unsigned long flags;
1383        int ret;
1384
1385        spin_lock_irqsave(&ihost->scic_lock, flags);
1386        idev = isci_lookup_device(dev);
1387        spin_unlock_irqrestore(&ihost->scic_lock, flags);
1388
1389        if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
1390                ret = TMF_RESP_FUNC_COMPLETE;
1391                goto out;
1392        }
1393
1394        ret = isci_reset_device(ihost, idev);
1395 out:
1396        isci_put_device(idev);
1397        return ret;
1398}
1399
1400int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1401{
1402        struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1403        struct isci_host *ihost = dev_to_ihost(dev);
1404        struct isci_remote_device *idev;
1405        unsigned long flags;
1406        int ret;
1407
1408        spin_lock_irqsave(&ihost->scic_lock, flags);
1409        idev = isci_lookup_device(dev);
1410        spin_unlock_irqrestore(&ihost->scic_lock, flags);
1411
1412        if (!idev) {
1413                ret = TMF_RESP_FUNC_COMPLETE;
1414                goto out;
1415        }
1416
1417        ret = isci_reset_device(ihost, idev);
1418 out:
1419        isci_put_device(idev);
1420        return ret;
1421}
1422