linux/drivers/scsi/libsas/sas_init.c
<<
>>
Prefs
   1/*
   2 * Serial Attached SCSI (SAS) Transport Layer initialization
   3 *
   4 * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
   5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
   6 *
   7 * This file is licensed under GPLv2.
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License as
  11 * published by the Free Software Foundation; either version 2 of the
  12 * License, or (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful, but
  15 * WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  17 * General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22 * USA
  23 *
  24 */
  25
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28#include <linux/init.h>
  29#include <linux/device.h>
  30#include <linux/spinlock.h>
  31#include <scsi/sas_ata.h>
  32#include <scsi/scsi_host.h>
  33#include <scsi/scsi_device.h>
  34#include <scsi/scsi_transport.h>
  35#include <scsi/scsi_transport_sas.h>
  36
  37#include "sas_internal.h"
  38
  39#include "../scsi_sas_internal.h"
  40
  41static struct kmem_cache *sas_task_cache;
  42static struct kmem_cache *sas_event_cache;
  43
  44struct sas_task *sas_alloc_task(gfp_t flags)
  45{
  46        struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
  47
  48        if (task) {
  49                spin_lock_init(&task->task_state_lock);
  50                task->task_state_flags = SAS_TASK_STATE_PENDING;
  51        }
  52
  53        return task;
  54}
  55EXPORT_SYMBOL_GPL(sas_alloc_task);
  56
  57struct sas_task *sas_alloc_slow_task(gfp_t flags)
  58{
  59        struct sas_task *task = sas_alloc_task(flags);
  60        struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
  61
  62        if (!task || !slow) {
  63                if (task)
  64                        kmem_cache_free(sas_task_cache, task);
  65                kfree(slow);
  66                return NULL;
  67        }
  68
  69        task->slow_task = slow;
  70        slow->task = task;
  71        timer_setup(&slow->timer, NULL, 0);
  72        init_completion(&slow->completion);
  73
  74        return task;
  75}
  76EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
  77
  78void sas_free_task(struct sas_task *task)
  79{
  80        if (task) {
  81                kfree(task->slow_task);
  82                kmem_cache_free(sas_task_cache, task);
  83        }
  84}
  85EXPORT_SYMBOL_GPL(sas_free_task);
  86
  87/*------------ SAS addr hash -----------*/
  88void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
  89{
  90        const u32 poly = 0x00DB2777;
  91        u32     r = 0;
  92        int     i;
  93
  94        for (i = 0; i < 8; i++) {
  95                int b;
  96                for (b = 7; b >= 0; b--) {
  97                        r <<= 1;
  98                        if ((1 << b) & sas_addr[i]) {
  99                                if (!(r & 0x01000000))
 100                                        r ^= poly;
 101                        } else if (r & 0x01000000)
 102                                r ^= poly;
 103                }
 104        }
 105
 106        hashed[0] = (r >> 16) & 0xFF;
 107        hashed[1] = (r >> 8) & 0xFF ;
 108        hashed[2] = r & 0xFF;
 109}
 110
 111int sas_register_ha(struct sas_ha_struct *sas_ha)
 112{
 113        char name[64];
 114        int error = 0;
 115
 116        mutex_init(&sas_ha->disco_mutex);
 117        spin_lock_init(&sas_ha->phy_port_lock);
 118        sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
 119
 120        set_bit(SAS_HA_REGISTERED, &sas_ha->state);
 121        spin_lock_init(&sas_ha->lock);
 122        mutex_init(&sas_ha->drain_mutex);
 123        init_waitqueue_head(&sas_ha->eh_wait_q);
 124        INIT_LIST_HEAD(&sas_ha->defer_q);
 125        INIT_LIST_HEAD(&sas_ha->eh_dev_q);
 126
 127        sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
 128
 129        error = sas_register_phys(sas_ha);
 130        if (error) {
 131                printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
 132                return error;
 133        }
 134
 135        error = sas_register_ports(sas_ha);
 136        if (error) {
 137                printk(KERN_NOTICE "couldn't register sas ports:%d\n", error);
 138                goto Undo_phys;
 139        }
 140
 141        error = sas_init_events(sas_ha);
 142        if (error) {
 143                printk(KERN_NOTICE "couldn't start event thread:%d\n", error);
 144                goto Undo_ports;
 145        }
 146
 147        error = -ENOMEM;
 148        snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
 149        sas_ha->event_q = create_singlethread_workqueue(name);
 150        if (!sas_ha->event_q)
 151                goto Undo_ports;
 152
 153        snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
 154        sas_ha->disco_q = create_singlethread_workqueue(name);
 155        if (!sas_ha->disco_q)
 156                goto Undo_event_q;
 157
 158        INIT_LIST_HEAD(&sas_ha->eh_done_q);
 159        INIT_LIST_HEAD(&sas_ha->eh_ata_q);
 160
 161        return 0;
 162
 163Undo_event_q:
 164        destroy_workqueue(sas_ha->event_q);
 165Undo_ports:
 166        sas_unregister_ports(sas_ha);
 167Undo_phys:
 168
 169        return error;
 170}
 171
 172static void sas_disable_events(struct sas_ha_struct *sas_ha)
 173{
 174        /* Set the state to unregistered to avoid further unchained
 175         * events to be queued, and flush any in-progress drainers
 176         */
 177        mutex_lock(&sas_ha->drain_mutex);
 178        spin_lock_irq(&sas_ha->lock);
 179        clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
 180        spin_unlock_irq(&sas_ha->lock);
 181        __sas_drain_work(sas_ha);
 182        mutex_unlock(&sas_ha->drain_mutex);
 183}
 184
 185int sas_unregister_ha(struct sas_ha_struct *sas_ha)
 186{
 187        sas_disable_events(sas_ha);
 188        sas_unregister_ports(sas_ha);
 189
 190        /* flush unregistration work */
 191        mutex_lock(&sas_ha->drain_mutex);
 192        __sas_drain_work(sas_ha);
 193        mutex_unlock(&sas_ha->drain_mutex);
 194
 195        destroy_workqueue(sas_ha->disco_q);
 196        destroy_workqueue(sas_ha->event_q);
 197
 198        return 0;
 199}
 200
 201static int sas_get_linkerrors(struct sas_phy *phy)
 202{
 203        if (scsi_is_sas_phy_local(phy)) {
 204                struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
 205                struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
 206                struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
 207                struct sas_internal *i =
 208                        to_sas_internal(sas_ha->core.shost->transportt);
 209
 210                return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
 211        }
 212
 213        return sas_smp_get_phy_events(phy);
 214}
 215
 216int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
 217{
 218        struct domain_device *dev = NULL;
 219
 220        /* try to route user requested link resets through libata */
 221        if (asd_phy->port)
 222                dev = asd_phy->port->port_dev;
 223
 224        /* validate that dev has been probed */
 225        if (dev)
 226                dev = sas_find_dev_by_rphy(dev->rphy);
 227
 228        if (dev && dev_is_sata(dev)) {
 229                sas_ata_schedule_reset(dev);
 230                sas_ata_wait_eh(dev);
 231                return 0;
 232        }
 233
 234        return -ENODEV;
 235}
 236
 237/*
 238 * transport_sas_phy_reset - reset a phy and permit libata to manage the link
 239 *
 240 * phy reset request via sysfs in host workqueue context so we know we
 241 * can block on eh and safely traverse the domain_device topology
 242 */
 243static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
 244{
 245        enum phy_func reset_type;
 246
 247        if (hard_reset)
 248                reset_type = PHY_FUNC_HARD_RESET;
 249        else
 250                reset_type = PHY_FUNC_LINK_RESET;
 251
 252        if (scsi_is_sas_phy_local(phy)) {
 253                struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
 254                struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
 255                struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
 256                struct sas_internal *i =
 257                        to_sas_internal(sas_ha->core.shost->transportt);
 258
 259                if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
 260                        return 0;
 261                return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
 262        } else {
 263                struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
 264                struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
 265                struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
 266
 267                if (ata_dev && !hard_reset) {
 268                        sas_ata_schedule_reset(ata_dev);
 269                        sas_ata_wait_eh(ata_dev);
 270                        return 0;
 271                } else
 272                        return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
 273        }
 274}
 275
 276static int sas_phy_enable(struct sas_phy *phy, int enable)
 277{
 278        int ret;
 279        enum phy_func cmd;
 280
 281        if (enable)
 282                cmd = PHY_FUNC_LINK_RESET;
 283        else
 284                cmd = PHY_FUNC_DISABLE;
 285
 286        if (scsi_is_sas_phy_local(phy)) {
 287                struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
 288                struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
 289                struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
 290                struct sas_internal *i =
 291                        to_sas_internal(sas_ha->core.shost->transportt);
 292
 293                if (enable)
 294                        ret = transport_sas_phy_reset(phy, 0);
 295                else
 296                        ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
 297        } else {
 298                struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
 299                struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
 300
 301                if (enable)
 302                        ret = transport_sas_phy_reset(phy, 0);
 303                else
 304                        ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
 305        }
 306        return ret;
 307}
 308
 309int sas_phy_reset(struct sas_phy *phy, int hard_reset)
 310{
 311        int ret;
 312        enum phy_func reset_type;
 313
 314        if (!phy->enabled)
 315                return -ENODEV;
 316
 317        if (hard_reset)
 318                reset_type = PHY_FUNC_HARD_RESET;
 319        else
 320                reset_type = PHY_FUNC_LINK_RESET;
 321
 322        if (scsi_is_sas_phy_local(phy)) {
 323                struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
 324                struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
 325                struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
 326                struct sas_internal *i =
 327                        to_sas_internal(sas_ha->core.shost->transportt);
 328
 329                ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
 330        } else {
 331                struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
 332                struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
 333                ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
 334        }
 335        return ret;
 336}
 337
 338int sas_set_phy_speed(struct sas_phy *phy,
 339                      struct sas_phy_linkrates *rates)
 340{
 341        int ret;
 342
 343        if ((rates->minimum_linkrate &&
 344             rates->minimum_linkrate > phy->maximum_linkrate) ||
 345            (rates->maximum_linkrate &&
 346             rates->maximum_linkrate < phy->minimum_linkrate))
 347                return -EINVAL;
 348
 349        if (rates->minimum_linkrate &&
 350            rates->minimum_linkrate < phy->minimum_linkrate_hw)
 351                rates->minimum_linkrate = phy->minimum_linkrate_hw;
 352
 353        if (rates->maximum_linkrate &&
 354            rates->maximum_linkrate > phy->maximum_linkrate_hw)
 355                rates->maximum_linkrate = phy->maximum_linkrate_hw;
 356
 357        if (scsi_is_sas_phy_local(phy)) {
 358                struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
 359                struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
 360                struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
 361                struct sas_internal *i =
 362                        to_sas_internal(sas_ha->core.shost->transportt);
 363
 364                ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
 365                                               rates);
 366        } else {
 367                struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
 368                struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
 369                ret = sas_smp_phy_control(ddev, phy->number,
 370                                          PHY_FUNC_LINK_RESET, rates);
 371
 372        }
 373
 374        return ret;
 375}
 376
 377void sas_prep_resume_ha(struct sas_ha_struct *ha)
 378{
 379        int i;
 380
 381        set_bit(SAS_HA_REGISTERED, &ha->state);
 382
 383        /* clear out any stale link events/data from the suspension path */
 384        for (i = 0; i < ha->num_phys; i++) {
 385                struct asd_sas_phy *phy = ha->sas_phy[i];
 386
 387                memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
 388                phy->frame_rcvd_size = 0;
 389        }
 390}
 391EXPORT_SYMBOL(sas_prep_resume_ha);
 392
 393static int phys_suspended(struct sas_ha_struct *ha)
 394{
 395        int i, rc = 0;
 396
 397        for (i = 0; i < ha->num_phys; i++) {
 398                struct asd_sas_phy *phy = ha->sas_phy[i];
 399
 400                if (phy->suspended)
 401                        rc++;
 402        }
 403
 404        return rc;
 405}
 406
 407void sas_resume_ha(struct sas_ha_struct *ha)
 408{
 409        const unsigned long tmo = msecs_to_jiffies(25000);
 410        int i;
 411
 412        /* deform ports on phys that did not resume
 413         * at this point we may be racing the phy coming back (as posted
 414         * by the lldd).  So we post the event and once we are in the
 415         * libsas context check that the phy remains suspended before
 416         * tearing it down.
 417         */
 418        i = phys_suspended(ha);
 419        if (i)
 420                dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
 421                         i, i > 1 ? "s" : "");
 422        wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
 423        for (i = 0; i < ha->num_phys; i++) {
 424                struct asd_sas_phy *phy = ha->sas_phy[i];
 425
 426                if (phy->suspended) {
 427                        dev_warn(&phy->phy->dev, "resume timeout\n");
 428                        sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
 429                }
 430        }
 431
 432        /* all phys are back up or timed out, turn on i/o so we can
 433         * flush out disks that did not return
 434         */
 435        scsi_unblock_requests(ha->core.shost);
 436        sas_drain_work(ha);
 437}
 438EXPORT_SYMBOL(sas_resume_ha);
 439
 440void sas_suspend_ha(struct sas_ha_struct *ha)
 441{
 442        int i;
 443
 444        sas_disable_events(ha);
 445        scsi_block_requests(ha->core.shost);
 446        for (i = 0; i < ha->num_phys; i++) {
 447                struct asd_sas_port *port = ha->sas_port[i];
 448
 449                sas_discover_event(port, DISCE_SUSPEND);
 450        }
 451
 452        /* flush suspend events while unregistered */
 453        mutex_lock(&ha->drain_mutex);
 454        __sas_drain_work(ha);
 455        mutex_unlock(&ha->drain_mutex);
 456}
 457EXPORT_SYMBOL(sas_suspend_ha);
 458
 459static void sas_phy_release(struct sas_phy *phy)
 460{
 461        kfree(phy->hostdata);
 462        phy->hostdata = NULL;
 463}
 464
 465static void phy_reset_work(struct work_struct *work)
 466{
 467        struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
 468
 469        d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
 470}
 471
 472static void phy_enable_work(struct work_struct *work)
 473{
 474        struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
 475
 476        d->enable_result = sas_phy_enable(d->phy, d->enable);
 477}
 478
 479static int sas_phy_setup(struct sas_phy *phy)
 480{
 481        struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
 482
 483        if (!d)
 484                return -ENOMEM;
 485
 486        mutex_init(&d->event_lock);
 487        INIT_SAS_WORK(&d->reset_work, phy_reset_work);
 488        INIT_SAS_WORK(&d->enable_work, phy_enable_work);
 489        d->phy = phy;
 490        phy->hostdata = d;
 491
 492        return 0;
 493}
 494
 495static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
 496{
 497        struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
 498        struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
 499        struct sas_phy_data *d = phy->hostdata;
 500        int rc;
 501
 502        if (!d)
 503                return -ENOMEM;
 504
 505        /* libsas workqueue coordinates ata-eh reset with discovery */
 506        mutex_lock(&d->event_lock);
 507        d->reset_result = 0;
 508        d->hard_reset = hard_reset;
 509
 510        spin_lock_irq(&ha->lock);
 511        sas_queue_work(ha, &d->reset_work);
 512        spin_unlock_irq(&ha->lock);
 513
 514        rc = sas_drain_work(ha);
 515        if (rc == 0)
 516                rc = d->reset_result;
 517        mutex_unlock(&d->event_lock);
 518
 519        return rc;
 520}
 521
 522static int queue_phy_enable(struct sas_phy *phy, int enable)
 523{
 524        struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
 525        struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
 526        struct sas_phy_data *d = phy->hostdata;
 527        int rc;
 528
 529        if (!d)
 530                return -ENOMEM;
 531
 532        /* libsas workqueue coordinates ata-eh reset with discovery */
 533        mutex_lock(&d->event_lock);
 534        d->enable_result = 0;
 535        d->enable = enable;
 536
 537        spin_lock_irq(&ha->lock);
 538        sas_queue_work(ha, &d->enable_work);
 539        spin_unlock_irq(&ha->lock);
 540
 541        rc = sas_drain_work(ha);
 542        if (rc == 0)
 543                rc = d->enable_result;
 544        mutex_unlock(&d->event_lock);
 545
 546        return rc;
 547}
 548
 549static struct sas_function_template sft = {
 550        .phy_enable = queue_phy_enable,
 551        .phy_reset = queue_phy_reset,
 552        .phy_setup = sas_phy_setup,
 553        .phy_release = sas_phy_release,
 554        .set_phy_speed = sas_set_phy_speed,
 555        .get_linkerrors = sas_get_linkerrors,
 556        .smp_handler = sas_smp_handler,
 557};
 558
 559static inline ssize_t phy_event_threshold_show(struct device *dev,
 560                        struct device_attribute *attr, char *buf)
 561{
 562        struct Scsi_Host *shost = class_to_shost(dev);
 563        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 564
 565        return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
 566}
 567
 568static inline ssize_t phy_event_threshold_store(struct device *dev,
 569                        struct device_attribute *attr,
 570                        const char *buf, size_t count)
 571{
 572        struct Scsi_Host *shost = class_to_shost(dev);
 573        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 574
 575        sha->event_thres = simple_strtol(buf, NULL, 10);
 576
 577        /* threshold cannot be set too small */
 578        if (sha->event_thres < 32)
 579                sha->event_thres = 32;
 580
 581        return count;
 582}
 583
 584DEVICE_ATTR(phy_event_threshold,
 585        S_IRUGO|S_IWUSR,
 586        phy_event_threshold_show,
 587        phy_event_threshold_store);
 588EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
 589
 590struct scsi_transport_template *
 591sas_domain_attach_transport(struct sas_domain_function_template *dft)
 592{
 593        struct scsi_transport_template *stt = sas_attach_transport(&sft);
 594        struct sas_internal *i;
 595
 596        if (!stt)
 597                return stt;
 598
 599        i = to_sas_internal(stt);
 600        i->dft = dft;
 601        stt->create_work_queue = 1;
 602        stt->eh_strategy_handler = sas_scsi_recover_host;
 603
 604        return stt;
 605}
 606EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
 607
 608
 609struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
 610{
 611        struct asd_sas_event *event;
 612        gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
 613        struct sas_ha_struct *sas_ha = phy->ha;
 614        struct sas_internal *i =
 615                to_sas_internal(sas_ha->core.shost->transportt);
 616
 617        event = kmem_cache_zalloc(sas_event_cache, flags);
 618        if (!event)
 619                return NULL;
 620
 621        atomic_inc(&phy->event_nr);
 622
 623        if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
 624                if (i->dft->lldd_control_phy) {
 625                        if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
 626                                sas_printk("The phy%02d bursting events, shut it down.\n",
 627                                        phy->id);
 628                                sas_notify_phy_event(phy, PHYE_SHUTDOWN);
 629                        }
 630                } else {
 631                        /* Do not support PHY control, stop allocating events */
 632                        WARN_ONCE(1, "PHY control not supported.\n");
 633                        kmem_cache_free(sas_event_cache, event);
 634                        atomic_dec(&phy->event_nr);
 635                        event = NULL;
 636                }
 637        }
 638
 639        return event;
 640}
 641
 642void sas_free_event(struct asd_sas_event *event)
 643{
 644        struct asd_sas_phy *phy = event->phy;
 645
 646        kmem_cache_free(sas_event_cache, event);
 647        atomic_dec(&phy->event_nr);
 648}
 649
 650/* ---------- SAS Class register/unregister ---------- */
 651
 652static int __init sas_class_init(void)
 653{
 654        sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
 655        if (!sas_task_cache)
 656                goto out;
 657
 658        sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
 659        if (!sas_event_cache)
 660                goto free_task_kmem;
 661
 662        return 0;
 663free_task_kmem:
 664        kmem_cache_destroy(sas_task_cache);
 665out:
 666        return -ENOMEM;
 667}
 668
 669static void __exit sas_class_exit(void)
 670{
 671        kmem_cache_destroy(sas_task_cache);
 672        kmem_cache_destroy(sas_event_cache);
 673}
 674
 675MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
 676MODULE_DESCRIPTION("SAS Transport Layer");
 677MODULE_LICENSE("GPL v2");
 678
 679module_init(sas_class_init);
 680module_exit(sas_class_exit);
 681
 682EXPORT_SYMBOL_GPL(sas_register_ha);
 683EXPORT_SYMBOL_GPL(sas_unregister_ha);
 684