linux/include/scsi/scsi_host.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _SCSI_SCSI_HOST_H
   3#define _SCSI_SCSI_HOST_H
   4
   5#include <linux/device.h>
   6#include <linux/list.h>
   7#include <linux/types.h>
   8#include <linux/workqueue.h>
   9#include <linux/mutex.h>
  10#include <linux/seq_file.h>
  11#include <linux/blk-mq.h>
  12#include <scsi/scsi.h>
  13
  14struct block_device;
  15struct completion;
  16struct module;
  17struct scsi_cmnd;
  18struct scsi_device;
  19struct scsi_host_cmd_pool;
  20struct scsi_target;
  21struct Scsi_Host;
  22struct scsi_host_cmd_pool;
  23struct scsi_transport_template;
  24
  25
  26#define SG_ALL  SG_CHUNK_SIZE
  27
  28#define MODE_UNKNOWN 0x00
  29#define MODE_INITIATOR 0x01
  30#define MODE_TARGET 0x02
  31
  32struct scsi_host_template {
  33        struct module *module;
  34        const char *name;
  35
  36        /*
  37         * The info function will return whatever useful information the
  38         * developer sees fit.  If not provided, then the name field will
  39         * be used instead.
  40         *
  41         * Status: OPTIONAL
  42         */
  43        const char *(* info)(struct Scsi_Host *);
  44
  45        /*
  46         * Ioctl interface
  47         *
  48         * Status: OPTIONAL
  49         */
  50        int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
  51                     void __user *arg);
  52
  53
  54#ifdef CONFIG_COMPAT
  55        /* 
  56         * Compat handler. Handle 32bit ABI.
  57         * When unknown ioctl is passed return -ENOIOCTLCMD.
  58         *
  59         * Status: OPTIONAL
  60         */
  61        int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
  62                            void __user *arg);
  63#endif
  64
  65        int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
  66        int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
  67
  68        /*
  69         * The queuecommand function is used to queue up a scsi
  70         * command block to the LLDD.  When the driver finished
  71         * processing the command the done callback is invoked.
  72         *
  73         * If queuecommand returns 0, then the driver has accepted the
  74         * command.  It must also push it to the HBA if the scsi_cmnd
  75         * flag SCMD_LAST is set, or if the driver does not implement
  76         * commit_rqs.  The done() function must be called on the command
  77         * when the driver has finished with it. (you may call done on the
  78         * command before queuecommand returns, but in this case you
  79         * *must* return 0 from queuecommand).
  80         *
  81         * Queuecommand may also reject the command, in which case it may
  82         * not touch the command and must not call done() for it.
  83         *
  84         * There are two possible rejection returns:
  85         *
  86         *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
  87         *   allow commands to other devices serviced by this host.
  88         *
  89         *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
  90         *   host temporarily.
  91         *
  92         * For compatibility, any other non-zero return is treated the
  93         * same as SCSI_MLQUEUE_HOST_BUSY.
  94         *
  95         * NOTE: "temporarily" means either until the next command for#
  96         * this device/host completes, or a period of time determined by
  97         * I/O pressure in the system if there are no other outstanding
  98         * commands.
  99         *
 100         * STATUS: REQUIRED
 101         */
 102        int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
 103
 104        /*
 105         * The commit_rqs function is used to trigger a hardware
 106         * doorbell after some requests have been queued with
 107         * queuecommand, when an error is encountered before sending
 108         * the request with SCMD_LAST set.
 109         *
 110         * STATUS: OPTIONAL
 111         */
 112        void (*commit_rqs)(struct Scsi_Host *, u16);
 113
 114        /*
 115         * This is an error handling strategy routine.  You don't need to
 116         * define one of these if you don't want to - there is a default
 117         * routine that is present that should work in most cases.  For those
 118         * driver authors that have the inclination and ability to write their
 119         * own strategy routine, this is where it is specified.  Note - the
 120         * strategy routine is *ALWAYS* run in the context of the kernel eh
 121         * thread.  Thus you are guaranteed to *NOT* be in an interrupt
 122         * handler when you execute this, and you are also guaranteed to
 123         * *NOT* have any other commands being queued while you are in the
 124         * strategy routine. When you return from this function, operations
 125         * return to normal.
 126         *
 127         * See scsi_error.c scsi_unjam_host for additional comments about
 128         * what this function should and should not be attempting to do.
 129         *
 130         * Status: REQUIRED     (at least one of them)
 131         */
 132        int (* eh_abort_handler)(struct scsi_cmnd *);
 133        int (* eh_device_reset_handler)(struct scsi_cmnd *);
 134        int (* eh_target_reset_handler)(struct scsi_cmnd *);
 135        int (* eh_bus_reset_handler)(struct scsi_cmnd *);
 136        int (* eh_host_reset_handler)(struct scsi_cmnd *);
 137
 138        /*
 139         * Before the mid layer attempts to scan for a new device where none
 140         * currently exists, it will call this entry in your driver.  Should
 141         * your driver need to allocate any structs or perform any other init
 142         * items in order to send commands to a currently unused target/lun
 143         * combo, then this is where you can perform those allocations.  This
 144         * is specifically so that drivers won't have to perform any kind of
 145         * "is this a new device" checks in their queuecommand routine,
 146         * thereby making the hot path a bit quicker.
 147         *
 148         * Return values: 0 on success, non-0 on failure
 149         *
 150         * Deallocation:  If we didn't find any devices at this ID, you will
 151         * get an immediate call to slave_destroy().  If we find something
 152         * here then you will get a call to slave_configure(), then the
 153         * device will be used for however long it is kept around, then when
 154         * the device is removed from the system (or * possibly at reboot
 155         * time), you will then get a call to slave_destroy().  This is
 156         * assuming you implement slave_configure and slave_destroy.
 157         * However, if you allocate memory and hang it off the device struct,
 158         * then you must implement the slave_destroy() routine at a minimum
 159         * in order to avoid leaking memory
 160         * each time a device is tore down.
 161         *
 162         * Status: OPTIONAL
 163         */
 164        int (* slave_alloc)(struct scsi_device *);
 165
 166        /*
 167         * Once the device has responded to an INQUIRY and we know the
 168         * device is online, we call into the low level driver with the
 169         * struct scsi_device *.  If the low level device driver implements
 170         * this function, it *must* perform the task of setting the queue
 171         * depth on the device.  All other tasks are optional and depend
 172         * on what the driver supports and various implementation details.
 173         * 
 174         * Things currently recommended to be handled at this time include:
 175         *
 176         * 1.  Setting the device queue depth.  Proper setting of this is
 177         *     described in the comments for scsi_change_queue_depth.
 178         * 2.  Determining if the device supports the various synchronous
 179         *     negotiation protocols.  The device struct will already have
 180         *     responded to INQUIRY and the results of the standard items
 181         *     will have been shoved into the various device flag bits, eg.
 182         *     device->sdtr will be true if the device supports SDTR messages.
 183         * 3.  Allocating command structs that the device will need.
 184         * 4.  Setting the default timeout on this device (if needed).
 185         * 5.  Anything else the low level driver might want to do on a device
 186         *     specific setup basis...
 187         * 6.  Return 0 on success, non-0 on error.  The device will be marked
 188         *     as offline on error so that no access will occur.  If you return
 189         *     non-0, your slave_destroy routine will never get called for this
 190         *     device, so don't leave any loose memory hanging around, clean
 191         *     up after yourself before returning non-0
 192         *
 193         * Status: OPTIONAL
 194         */
 195        int (* slave_configure)(struct scsi_device *);
 196
 197        /*
 198         * Immediately prior to deallocating the device and after all activity
 199         * has ceased the mid layer calls this point so that the low level
 200         * driver may completely detach itself from the scsi device and vice
 201         * versa.  The low level driver is responsible for freeing any memory
 202         * it allocated in the slave_alloc or slave_configure calls. 
 203         *
 204         * Status: OPTIONAL
 205         */
 206        void (* slave_destroy)(struct scsi_device *);
 207
 208        /*
 209         * Before the mid layer attempts to scan for a new device attached
 210         * to a target where no target currently exists, it will call this
 211         * entry in your driver.  Should your driver need to allocate any
 212         * structs or perform any other init items in order to send commands
 213         * to a currently unused target, then this is where you can perform
 214         * those allocations.
 215         *
 216         * Return values: 0 on success, non-0 on failure
 217         *
 218         * Status: OPTIONAL
 219         */
 220        int (* target_alloc)(struct scsi_target *);
 221
 222        /*
 223         * Immediately prior to deallocating the target structure, and
 224         * after all activity to attached scsi devices has ceased, the
 225         * midlayer calls this point so that the driver may deallocate
 226         * and terminate any references to the target.
 227         *
 228         * Status: OPTIONAL
 229         */
 230        void (* target_destroy)(struct scsi_target *);
 231
 232        /*
 233         * If a host has the ability to discover targets on its own instead
 234         * of scanning the entire bus, it can fill in this function and
 235         * call scsi_scan_host().  This function will be called periodically
 236         * until it returns 1 with the scsi_host and the elapsed time of
 237         * the scan in jiffies.
 238         *
 239         * Status: OPTIONAL
 240         */
 241        int (* scan_finished)(struct Scsi_Host *, unsigned long);
 242
 243        /*
 244         * If the host wants to be called before the scan starts, but
 245         * after the midlayer has set up ready for the scan, it can fill
 246         * in this function.
 247         *
 248         * Status: OPTIONAL
 249         */
 250        void (* scan_start)(struct Scsi_Host *);
 251
 252        /*
 253         * Fill in this function to allow the queue depth of this host
 254         * to be changeable (on a per device basis).  Returns either
 255         * the current queue depth setting (may be different from what
 256         * was passed in) or an error.  An error should only be
 257         * returned if the requested depth is legal but the driver was
 258         * unable to set it.  If the requested depth is illegal, the
 259         * driver should set and return the closest legal queue depth.
 260         *
 261         * Status: OPTIONAL
 262         */
 263        int (* change_queue_depth)(struct scsi_device *, int);
 264
 265        /*
 266         * This functions lets the driver expose the queue mapping
 267         * to the block layer.
 268         *
 269         * Status: OPTIONAL
 270         */
 271        int (* map_queues)(struct Scsi_Host *shost);
 272
 273        /*
 274         * Check if scatterlists need to be padded for DMA draining.
 275         *
 276         * Status: OPTIONAL
 277         */
 278        bool (* dma_need_drain)(struct request *rq);
 279
 280        /*
 281         * This function determines the BIOS parameters for a given
 282         * harddisk.  These tend to be numbers that are made up by
 283         * the host adapter.  Parameters:
 284         * size, device, list (heads, sectors, cylinders)
 285         *
 286         * Status: OPTIONAL
 287         */
 288        int (* bios_param)(struct scsi_device *, struct block_device *,
 289                        sector_t, int []);
 290
 291        /*
 292         * This function is called when one or more partitions on the
 293         * device reach beyond the end of the device.
 294         *
 295         * Status: OPTIONAL
 296         */
 297        void (*unlock_native_capacity)(struct scsi_device *);
 298
 299        /*
 300         * Can be used to export driver statistics and other infos to the
 301         * world outside the kernel ie. userspace and it also provides an
 302         * interface to feed the driver with information.
 303         *
 304         * Status: OBSOLETE
 305         */
 306        int (*show_info)(struct seq_file *, struct Scsi_Host *);
 307        int (*write_info)(struct Scsi_Host *, char *, int);
 308
 309        /*
 310         * This is an optional routine that allows the transport to become
 311         * involved when a scsi io timer fires. The return value tells the
 312         * timer routine how to finish the io timeout handling.
 313         *
 314         * Status: OPTIONAL
 315         */
 316        enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
 317
 318        /* This is an optional routine that allows transport to initiate
 319         * LLD adapter or firmware reset using sysfs attribute.
 320         *
 321         * Return values: 0 on success, -ve value on failure.
 322         *
 323         * Status: OPTIONAL
 324         */
 325
 326        int (*host_reset)(struct Scsi_Host *shost, int reset_type);
 327#define SCSI_ADAPTER_RESET      1
 328#define SCSI_FIRMWARE_RESET     2
 329
 330
 331        /*
 332         * Name of proc directory
 333         */
 334        const char *proc_name;
 335
 336        /*
 337         * Used to store the procfs directory if a driver implements the
 338         * show_info method.
 339         */
 340        struct proc_dir_entry *proc_dir;
 341
 342        /*
 343         * This determines if we will use a non-interrupt driven
 344         * or an interrupt driven scheme.  It is set to the maximum number
 345         * of simultaneous commands a single hw queue in HBA will accept.
 346         */
 347        int can_queue;
 348
 349        /*
 350         * In many instances, especially where disconnect / reconnect are
 351         * supported, our host also has an ID on the SCSI bus.  If this is
 352         * the case, then it must be reserved.  Please set this_id to -1 if
 353         * your setup is in single initiator mode, and the host lacks an
 354         * ID.
 355         */
 356        int this_id;
 357
 358        /*
 359         * This determines the degree to which the host adapter is capable
 360         * of scatter-gather.
 361         */
 362        unsigned short sg_tablesize;
 363        unsigned short sg_prot_tablesize;
 364
 365        /*
 366         * Set this if the host adapter has limitations beside segment count.
 367         */
 368        unsigned int max_sectors;
 369
 370        /*
 371         * Maximum size in bytes of a single segment.
 372         */
 373        unsigned int max_segment_size;
 374
 375        /*
 376         * DMA scatter gather segment boundary limit. A segment crossing this
 377         * boundary will be split in two.
 378         */
 379        unsigned long dma_boundary;
 380
 381        unsigned long virt_boundary_mask;
 382
 383        /*
 384         * This specifies "machine infinity" for host templates which don't
 385         * limit the transfer size.  Note this limit represents an absolute
 386         * maximum, and may be over the transfer limits allowed for
 387         * individual devices (e.g. 256 for SCSI-1).
 388         */
 389#define SCSI_DEFAULT_MAX_SECTORS        1024
 390
 391        /*
 392         * True if this host adapter can make good use of linked commands.
 393         * This will allow more than one command to be queued to a given
 394         * unit on a given host.  Set this to the maximum number of command
 395         * blocks to be provided for each device.  Set this to 1 for one
 396         * command block per lun, 2 for two, etc.  Do not set this to 0.
 397         * You should make sure that the host adapter will do the right thing
 398         * before you try setting this above 1.
 399         */
 400        short cmd_per_lun;
 401
 402        /*
 403         * present contains counter indicating how many boards of this
 404         * type were found when we did the scan.
 405         */
 406        unsigned char present;
 407
 408        /* If use block layer to manage tags, this is tag allocation policy */
 409        int tag_alloc_policy;
 410
 411        /*
 412         * Track QUEUE_FULL events and reduce queue depth on demand.
 413         */
 414        unsigned track_queue_depth:1;
 415
 416        /*
 417         * This specifies the mode that a LLD supports.
 418         */
 419        unsigned supported_mode:2;
 420
 421        /*
 422         * True if this host adapter uses unchecked DMA onto an ISA bus.
 423         */
 424        unsigned unchecked_isa_dma:1;
 425
 426        /*
 427         * True for emulated SCSI host adapters (e.g. ATAPI).
 428         */
 429        unsigned emulated:1;
 430
 431        /*
 432         * True if the low-level driver performs its own reset-settle delays.
 433         */
 434        unsigned skip_settle_delay:1;
 435
 436        /* True if the controller does not support WRITE SAME */
 437        unsigned no_write_same:1;
 438
 439        /* True if the host uses host-wide tagspace */
 440        unsigned host_tagset:1;
 441
 442        /*
 443         * Countdown for host blocking with no commands outstanding.
 444         */
 445        unsigned int max_host_blocked;
 446
 447        /*
 448         * Default value for the blocking.  If the queue is empty,
 449         * host_blocked counts down in the request_fn until it restarts
 450         * host operations as zero is reached.  
 451         *
 452         * FIXME: This should probably be a value in the template
 453         */
 454#define SCSI_DEFAULT_HOST_BLOCKED       7
 455
 456        /*
 457         * Pointer to the sysfs class properties for this host, NULL terminated.
 458         */
 459        struct device_attribute **shost_attrs;
 460
 461        /*
 462         * Pointer to the SCSI device properties for this host, NULL terminated.
 463         */
 464        struct device_attribute **sdev_attrs;
 465
 466        /*
 467         * Pointer to the SCSI device attribute groups for this host,
 468         * NULL terminated.
 469         */
 470        const struct attribute_group **sdev_groups;
 471
 472        /*
 473         * Vendor Identifier associated with the host
 474         *
 475         * Note: When specifying vendor_id, be sure to read the
 476         *   Vendor Type and ID formatting requirements specified in
 477         *   scsi_netlink.h
 478         */
 479        u64 vendor_id;
 480
 481        /*
 482         * Additional per-command data allocated for the driver.
 483         */
 484        unsigned int cmd_size;
 485        struct scsi_host_cmd_pool *cmd_pool;
 486
 487        /* Delay for runtime autosuspend */
 488        int rpm_autosuspend_delay;
 489};
 490
 491/*
 492 * Temporary #define for host lock push down. Can be removed when all
 493 * drivers have been updated to take advantage of unlocked
 494 * queuecommand.
 495 *
 496 */
 497#define DEF_SCSI_QCMD(func_name) \
 498        int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)   \
 499        {                                                               \
 500                unsigned long irq_flags;                                \
 501                int rc;                                                 \
 502                spin_lock_irqsave(shost->host_lock, irq_flags);         \
 503                rc = func_name##_lck (cmd, cmd->scsi_done);                     \
 504                spin_unlock_irqrestore(shost->host_lock, irq_flags);    \
 505                return rc;                                              \
 506        }
 507
 508
 509/*
 510 * shost state: If you alter this, you also need to alter scsi_sysfs.c
 511 * (for the ascii descriptions) and the state model enforcer:
 512 * scsi_host_set_state()
 513 */
 514enum scsi_host_state {
 515        SHOST_CREATED = 1,
 516        SHOST_RUNNING,
 517        SHOST_CANCEL,
 518        SHOST_DEL,
 519        SHOST_RECOVERY,
 520        SHOST_CANCEL_RECOVERY,
 521        SHOST_DEL_RECOVERY,
 522};
 523
 524struct Scsi_Host {
 525        /*
 526         * __devices is protected by the host_lock, but you should
 527         * usually use scsi_device_lookup / shost_for_each_device
 528         * to access it and don't care about locking yourself.
 529         * In the rare case of being in irq context you can use
 530         * their __ prefixed variants with the lock held. NEVER
 531         * access this list directly from a driver.
 532         */
 533        struct list_head        __devices;
 534        struct list_head        __targets;
 535        
 536        struct list_head        starved_list;
 537
 538        spinlock_t              default_lock;
 539        spinlock_t              *host_lock;
 540
 541        struct mutex            scan_mutex;/* serialize scanning activity */
 542
 543        struct list_head        eh_cmd_q;
 544        struct task_struct    * ehandler;  /* Error recovery thread. */
 545        struct completion     * eh_action; /* Wait for specific actions on the
 546                                              host. */
 547        wait_queue_head_t       host_wait;
 548        struct scsi_host_template *hostt;
 549        struct scsi_transport_template *transportt;
 550
 551        /* Area to keep a shared tag map */
 552        struct blk_mq_tag_set   tag_set;
 553
 554        atomic_t host_blocked;
 555
 556        unsigned int host_failed;          /* commands that failed.
 557                                              protected by host_lock */
 558        unsigned int host_eh_scheduled;    /* EH scheduled without command */
 559    
 560        unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
 561
 562        /* next two fields are used to bound the time spent in error handling */
 563        int eh_deadline;
 564        unsigned long last_reset;
 565
 566
 567        /*
 568         * These three parameters can be used to allow for wide scsi,
 569         * and for host adapters that support multiple busses
 570         * The last two should be set to 1 more than the actual max id
 571         * or lun (e.g. 8 for SCSI parallel systems).
 572         */
 573        unsigned int max_channel;
 574        unsigned int max_id;
 575        u64 max_lun;
 576
 577        /*
 578         * This is a unique identifier that must be assigned so that we
 579         * have some way of identifying each detected host adapter properly
 580         * and uniquely.  For hosts that do not support more than one card
 581         * in the system at one time, this does not need to be set.  It is
 582         * initialized to 0 in scsi_register.
 583         */
 584        unsigned int unique_id;
 585
 586        /*
 587         * The maximum length of SCSI commands that this host can accept.
 588         * Probably 12 for most host adapters, but could be 16 for others.
 589         * or 260 if the driver supports variable length cdbs.
 590         * For drivers that don't set this field, a value of 12 is
 591         * assumed.
 592         */
 593        unsigned short max_cmd_len;
 594
 595        int this_id;
 596        int can_queue;
 597        short cmd_per_lun;
 598        short unsigned int sg_tablesize;
 599        short unsigned int sg_prot_tablesize;
 600        unsigned int max_sectors;
 601        unsigned int max_segment_size;
 602        unsigned long dma_boundary;
 603        unsigned long virt_boundary_mask;
 604        /*
 605         * In scsi-mq mode, the number of hardware queues supported by the LLD.
 606         *
 607         * Note: it is assumed that each hardware queue has a queue depth of
 608         * can_queue. In other words, the total queue depth per host
 609         * is nr_hw_queues * can_queue. However, for when host_tagset is set,
 610         * the total queue depth is can_queue.
 611         */
 612        unsigned nr_hw_queues;
 613        unsigned active_mode:2;
 614        unsigned unchecked_isa_dma:1;
 615
 616        /*
 617         * Host has requested that no further requests come through for the
 618         * time being.
 619         */
 620        unsigned host_self_blocked:1;
 621    
 622        /*
 623         * Host uses correct SCSI ordering not PC ordering. The bit is
 624         * set for the minority of drivers whose authors actually read
 625         * the spec ;).
 626         */
 627        unsigned reverse_ordering:1;
 628
 629        /* Task mgmt function in progress */
 630        unsigned tmf_in_progress:1;
 631
 632        /* Asynchronous scan in progress */
 633        unsigned async_scan:1;
 634
 635        /* Don't resume host in EH */
 636        unsigned eh_noresume:1;
 637
 638        /* The controller does not support WRITE SAME */
 639        unsigned no_write_same:1;
 640
 641        /* True if the host uses host-wide tagspace */
 642        unsigned host_tagset:1;
 643
 644        /* Host responded with short (<36 bytes) INQUIRY result */
 645        unsigned short_inquiry:1;
 646
 647        /* The transport requires the LUN bits NOT to be stored in CDB[1] */
 648        unsigned no_scsi2_lun_in_cdb:1;
 649
 650        /*
 651         * Optional work queue to be utilized by the transport
 652         */
 653        char work_q_name[20];
 654        struct workqueue_struct *work_q;
 655
 656        /*
 657         * Task management function work queue
 658         */
 659        struct workqueue_struct *tmf_work_q;
 660
 661        /*
 662         * Value host_blocked counts down from
 663         */
 664        unsigned int max_host_blocked;
 665
 666        /* Protection Information */
 667        unsigned int prot_capabilities;
 668        unsigned char prot_guard_type;
 669
 670        /* legacy crap */
 671        unsigned long base;
 672        unsigned long io_port;
 673        unsigned char n_io_port;
 674        unsigned char dma_channel;
 675        unsigned int  irq;
 676        
 677
 678        enum scsi_host_state shost_state;
 679
 680        /* ldm bits */
 681        struct device           shost_gendev, shost_dev;
 682
 683        /*
 684         * Points to the transport data (if any) which is allocated
 685         * separately
 686         */
 687        void *shost_data;
 688
 689        /*
 690         * Points to the physical bus device we'd use to do DMA
 691         * Needed just in case we have virtual hosts.
 692         */
 693        struct device *dma_dev;
 694
 695        /*
 696         * We should ensure that this is aligned, both for better performance
 697         * and also because some compilers (m68k) don't automatically force
 698         * alignment to a long boundary.
 699         */
 700        unsigned long hostdata[]  /* Used for storage of host specific stuff */
 701                __attribute__ ((aligned (sizeof(unsigned long))));
 702};
 703
 704#define         class_to_shost(d)       \
 705        container_of(d, struct Scsi_Host, shost_dev)
 706
 707#define shost_printk(prefix, shost, fmt, a...)  \
 708        dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
 709
 710static inline void *shost_priv(struct Scsi_Host *shost)
 711{
 712        return (void *)shost->hostdata;
 713}
 714
 715int scsi_is_host_device(const struct device *);
 716
 717static inline struct Scsi_Host *dev_to_shost(struct device *dev)
 718{
 719        while (!scsi_is_host_device(dev)) {
 720                if (!dev->parent)
 721                        return NULL;
 722                dev = dev->parent;
 723        }
 724        return container_of(dev, struct Scsi_Host, shost_gendev);
 725}
 726
 727static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
 728{
 729        return shost->shost_state == SHOST_RECOVERY ||
 730                shost->shost_state == SHOST_CANCEL_RECOVERY ||
 731                shost->shost_state == SHOST_DEL_RECOVERY ||
 732                shost->tmf_in_progress;
 733}
 734
 735extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
 736extern void scsi_flush_work(struct Scsi_Host *);
 737
 738extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
 739extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
 740                                               struct device *,
 741                                               struct device *);
 742extern void scsi_scan_host(struct Scsi_Host *);
 743extern void scsi_rescan_device(struct device *);
 744extern void scsi_remove_host(struct Scsi_Host *);
 745extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
 746extern int scsi_host_busy(struct Scsi_Host *shost);
 747extern void scsi_host_put(struct Scsi_Host *t);
 748extern struct Scsi_Host *scsi_host_lookup(unsigned short);
 749extern const char *scsi_host_state_name(enum scsi_host_state);
 750extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
 751                                            int status);
 752
 753static inline int __must_check scsi_add_host(struct Scsi_Host *host,
 754                                             struct device *dev)
 755{
 756        return scsi_add_host_with_dma(host, dev, dev);
 757}
 758
 759static inline struct device *scsi_get_device(struct Scsi_Host *shost)
 760{
 761        return shost->shost_gendev.parent;
 762}
 763
 764/**
 765 * scsi_host_scan_allowed - Is scanning of this host allowed
 766 * @shost:      Pointer to Scsi_Host.
 767 **/
 768static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
 769{
 770        return shost->shost_state == SHOST_RUNNING ||
 771               shost->shost_state == SHOST_RECOVERY;
 772}
 773
 774extern void scsi_unblock_requests(struct Scsi_Host *);
 775extern void scsi_block_requests(struct Scsi_Host *);
 776extern int scsi_host_block(struct Scsi_Host *shost);
 777extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
 778
 779void scsi_host_busy_iter(struct Scsi_Host *,
 780                         bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv);
 781
 782struct class_container;
 783
 784/*
 785 * These two functions are used to allocate and free a pseudo device
 786 * which will connect to the host adapter itself rather than any
 787 * physical device.  You must deallocate when you are done with the
 788 * thing.  This physical pseudo-device isn't real and won't be available
 789 * from any high-level drivers.
 790 */
 791extern void scsi_free_host_dev(struct scsi_device *);
 792extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
 793
 794/*
 795 * DIF defines the exchange of protection information between
 796 * initiator and SBC block device.
 797 *
 798 * DIX defines the exchange of protection information between OS and
 799 * initiator.
 800 */
 801enum scsi_host_prot_capabilities {
 802        SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
 803        SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
 804        SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
 805
 806        SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
 807        SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
 808        SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
 809        SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
 810};
 811
 812/*
 813 * SCSI hosts which support the Data Integrity Extensions must
 814 * indicate their capabilities by setting the prot_capabilities using
 815 * this call.
 816 */
 817static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
 818{
 819        shost->prot_capabilities = mask;
 820}
 821
 822static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
 823{
 824        return shost->prot_capabilities;
 825}
 826
 827static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
 828{
 829        return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
 830}
 831
 832static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
 833{
 834        static unsigned char cap[] = { 0,
 835                                       SHOST_DIF_TYPE1_PROTECTION,
 836                                       SHOST_DIF_TYPE2_PROTECTION,
 837                                       SHOST_DIF_TYPE3_PROTECTION };
 838
 839        if (target_type >= ARRAY_SIZE(cap))
 840                return 0;
 841
 842        return shost->prot_capabilities & cap[target_type] ? target_type : 0;
 843}
 844
 845static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
 846{
 847#if defined(CONFIG_BLK_DEV_INTEGRITY)
 848        static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
 849                                       SHOST_DIX_TYPE1_PROTECTION,
 850                                       SHOST_DIX_TYPE2_PROTECTION,
 851                                       SHOST_DIX_TYPE3_PROTECTION };
 852
 853        if (target_type >= ARRAY_SIZE(cap))
 854                return 0;
 855
 856        return shost->prot_capabilities & cap[target_type];
 857#endif
 858        return 0;
 859}
 860
 861/*
 862 * All DIX-capable initiators must support the T10-mandated CRC
 863 * checksum.  Controllers can optionally implement the IP checksum
 864 * scheme which has much lower impact on system performance.  Note
 865 * that the main rationale for the checksum is to match integrity
 866 * metadata with data.  Detecting bit errors are a job for ECC memory
 867 * and buses.
 868 */
 869
 870enum scsi_host_guard_type {
 871        SHOST_DIX_GUARD_CRC = 1 << 0,
 872        SHOST_DIX_GUARD_IP  = 1 << 1,
 873};
 874
 875static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
 876{
 877        shost->prot_guard_type = type;
 878}
 879
 880static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
 881{
 882        return shost->prot_guard_type;
 883}
 884
 885extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
 886
 887#endif /* _SCSI_SCSI_HOST_H */
 888