linux/include/scsi/scsi_host.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _SCSI_SCSI_HOST_H
   3#define _SCSI_SCSI_HOST_H
   4
   5#include <linux/device.h>
   6#include <linux/list.h>
   7#include <linux/types.h>
   8#include <linux/workqueue.h>
   9#include <linux/mutex.h>
  10#include <linux/seq_file.h>
  11#include <linux/blk-mq.h>
  12#include <scsi/scsi.h>
  13
  14struct block_device;
  15struct completion;
  16struct module;
  17struct scsi_cmnd;
  18struct scsi_device;
  19struct scsi_host_cmd_pool;
  20struct scsi_target;
  21struct Scsi_Host;
  22struct scsi_transport_template;
  23
  24
  25#define SG_ALL  SG_CHUNK_SIZE
  26
  27#define MODE_UNKNOWN 0x00
  28#define MODE_INITIATOR 0x01
  29#define MODE_TARGET 0x02
  30
  31struct scsi_host_template {
  32        /*
  33         * Put fields referenced in IO submission path together in
  34         * same cacheline
  35         */
  36
  37        /*
  38         * Additional per-command data allocated for the driver.
  39         */
  40        unsigned int cmd_size;
  41
  42        /*
  43         * The queuecommand function is used to queue up a scsi
  44         * command block to the LLDD.  When the driver finished
  45         * processing the command the done callback is invoked.
  46         *
  47         * If queuecommand returns 0, then the driver has accepted the
  48         * command.  It must also push it to the HBA if the scsi_cmnd
  49         * flag SCMD_LAST is set, or if the driver does not implement
  50         * commit_rqs.  The done() function must be called on the command
  51         * when the driver has finished with it. (you may call done on the
  52         * command before queuecommand returns, but in this case you
  53         * *must* return 0 from queuecommand).
  54         *
  55         * Queuecommand may also reject the command, in which case it may
  56         * not touch the command and must not call done() for it.
  57         *
  58         * There are two possible rejection returns:
  59         *
  60         *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
  61         *   allow commands to other devices serviced by this host.
  62         *
  63         *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
  64         *   host temporarily.
  65         *
  66         * For compatibility, any other non-zero return is treated the
  67         * same as SCSI_MLQUEUE_HOST_BUSY.
  68         *
  69         * NOTE: "temporarily" means either until the next command for#
  70         * this device/host completes, or a period of time determined by
  71         * I/O pressure in the system if there are no other outstanding
  72         * commands.
  73         *
  74         * STATUS: REQUIRED
  75         */
  76        int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
  77
  78        /*
  79         * The commit_rqs function is used to trigger a hardware
  80         * doorbell after some requests have been queued with
  81         * queuecommand, when an error is encountered before sending
  82         * the request with SCMD_LAST set.
  83         *
  84         * STATUS: OPTIONAL
  85         */
  86        void (*commit_rqs)(struct Scsi_Host *, u16);
  87
  88        struct module *module;
  89        const char *name;
  90
  91        /*
  92         * The info function will return whatever useful information the
  93         * developer sees fit.  If not provided, then the name field will
  94         * be used instead.
  95         *
  96         * Status: OPTIONAL
  97         */
  98        const char *(*info)(struct Scsi_Host *);
  99
 100        /*
 101         * Ioctl interface
 102         *
 103         * Status: OPTIONAL
 104         */
 105        int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
 106                     void __user *arg);
 107
 108
 109#ifdef CONFIG_COMPAT
 110        /*
 111         * Compat handler. Handle 32bit ABI.
 112         * When unknown ioctl is passed return -ENOIOCTLCMD.
 113         *
 114         * Status: OPTIONAL
 115         */
 116        int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
 117                            void __user *arg);
 118#endif
 119
 120        int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
 121        int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
 122
 123        /*
 124         * This is an error handling strategy routine.  You don't need to
 125         * define one of these if you don't want to - there is a default
 126         * routine that is present that should work in most cases.  For those
 127         * driver authors that have the inclination and ability to write their
 128         * own strategy routine, this is where it is specified.  Note - the
 129         * strategy routine is *ALWAYS* run in the context of the kernel eh
 130         * thread.  Thus you are guaranteed to *NOT* be in an interrupt
 131         * handler when you execute this, and you are also guaranteed to
 132         * *NOT* have any other commands being queued while you are in the
 133         * strategy routine. When you return from this function, operations
 134         * return to normal.
 135         *
 136         * See scsi_error.c scsi_unjam_host for additional comments about
 137         * what this function should and should not be attempting to do.
 138         *
 139         * Status: REQUIRED     (at least one of them)
 140         */
 141        int (* eh_abort_handler)(struct scsi_cmnd *);
 142        int (* eh_device_reset_handler)(struct scsi_cmnd *);
 143        int (* eh_target_reset_handler)(struct scsi_cmnd *);
 144        int (* eh_bus_reset_handler)(struct scsi_cmnd *);
 145        int (* eh_host_reset_handler)(struct scsi_cmnd *);
 146
 147        /*
 148         * Before the mid layer attempts to scan for a new device where none
 149         * currently exists, it will call this entry in your driver.  Should
 150         * your driver need to allocate any structs or perform any other init
 151         * items in order to send commands to a currently unused target/lun
 152         * combo, then this is where you can perform those allocations.  This
 153         * is specifically so that drivers won't have to perform any kind of
 154         * "is this a new device" checks in their queuecommand routine,
 155         * thereby making the hot path a bit quicker.
 156         *
 157         * Return values: 0 on success, non-0 on failure
 158         *
 159         * Deallocation:  If we didn't find any devices at this ID, you will
 160         * get an immediate call to slave_destroy().  If we find something
 161         * here then you will get a call to slave_configure(), then the
 162         * device will be used for however long it is kept around, then when
 163         * the device is removed from the system (or * possibly at reboot
 164         * time), you will then get a call to slave_destroy().  This is
 165         * assuming you implement slave_configure and slave_destroy.
 166         * However, if you allocate memory and hang it off the device struct,
 167         * then you must implement the slave_destroy() routine at a minimum
 168         * in order to avoid leaking memory
 169         * each time a device is tore down.
 170         *
 171         * Status: OPTIONAL
 172         */
 173        int (* slave_alloc)(struct scsi_device *);
 174
 175        /*
 176         * Once the device has responded to an INQUIRY and we know the
 177         * device is online, we call into the low level driver with the
 178         * struct scsi_device *.  If the low level device driver implements
 179         * this function, it *must* perform the task of setting the queue
 180         * depth on the device.  All other tasks are optional and depend
 181         * on what the driver supports and various implementation details.
 182         * 
 183         * Things currently recommended to be handled at this time include:
 184         *
 185         * 1.  Setting the device queue depth.  Proper setting of this is
 186         *     described in the comments for scsi_change_queue_depth.
 187         * 2.  Determining if the device supports the various synchronous
 188         *     negotiation protocols.  The device struct will already have
 189         *     responded to INQUIRY and the results of the standard items
 190         *     will have been shoved into the various device flag bits, eg.
 191         *     device->sdtr will be true if the device supports SDTR messages.
 192         * 3.  Allocating command structs that the device will need.
 193         * 4.  Setting the default timeout on this device (if needed).
 194         * 5.  Anything else the low level driver might want to do on a device
 195         *     specific setup basis...
 196         * 6.  Return 0 on success, non-0 on error.  The device will be marked
 197         *     as offline on error so that no access will occur.  If you return
 198         *     non-0, your slave_destroy routine will never get called for this
 199         *     device, so don't leave any loose memory hanging around, clean
 200         *     up after yourself before returning non-0
 201         *
 202         * Status: OPTIONAL
 203         */
 204        int (* slave_configure)(struct scsi_device *);
 205
 206        /*
 207         * Immediately prior to deallocating the device and after all activity
 208         * has ceased the mid layer calls this point so that the low level
 209         * driver may completely detach itself from the scsi device and vice
 210         * versa.  The low level driver is responsible for freeing any memory
 211         * it allocated in the slave_alloc or slave_configure calls. 
 212         *
 213         * Status: OPTIONAL
 214         */
 215        void (* slave_destroy)(struct scsi_device *);
 216
 217        /*
 218         * Before the mid layer attempts to scan for a new device attached
 219         * to a target where no target currently exists, it will call this
 220         * entry in your driver.  Should your driver need to allocate any
 221         * structs or perform any other init items in order to send commands
 222         * to a currently unused target, then this is where you can perform
 223         * those allocations.
 224         *
 225         * Return values: 0 on success, non-0 on failure
 226         *
 227         * Status: OPTIONAL
 228         */
 229        int (* target_alloc)(struct scsi_target *);
 230
 231        /*
 232         * Immediately prior to deallocating the target structure, and
 233         * after all activity to attached scsi devices has ceased, the
 234         * midlayer calls this point so that the driver may deallocate
 235         * and terminate any references to the target.
 236         *
 237         * Status: OPTIONAL
 238         */
 239        void (* target_destroy)(struct scsi_target *);
 240
 241        /*
 242         * If a host has the ability to discover targets on its own instead
 243         * of scanning the entire bus, it can fill in this function and
 244         * call scsi_scan_host().  This function will be called periodically
 245         * until it returns 1 with the scsi_host and the elapsed time of
 246         * the scan in jiffies.
 247         *
 248         * Status: OPTIONAL
 249         */
 250        int (* scan_finished)(struct Scsi_Host *, unsigned long);
 251
 252        /*
 253         * If the host wants to be called before the scan starts, but
 254         * after the midlayer has set up ready for the scan, it can fill
 255         * in this function.
 256         *
 257         * Status: OPTIONAL
 258         */
 259        void (* scan_start)(struct Scsi_Host *);
 260
 261        /*
 262         * Fill in this function to allow the queue depth of this host
 263         * to be changeable (on a per device basis).  Returns either
 264         * the current queue depth setting (may be different from what
 265         * was passed in) or an error.  An error should only be
 266         * returned if the requested depth is legal but the driver was
 267         * unable to set it.  If the requested depth is illegal, the
 268         * driver should set and return the closest legal queue depth.
 269         *
 270         * Status: OPTIONAL
 271         */
 272        int (* change_queue_depth)(struct scsi_device *, int);
 273
 274        /*
 275         * This functions lets the driver expose the queue mapping
 276         * to the block layer.
 277         *
 278         * Status: OPTIONAL
 279         */
 280        int (* map_queues)(struct Scsi_Host *shost);
 281
 282        /*
 283         * SCSI interface of blk_poll - poll for IO completions.
 284         * Only applicable if SCSI LLD exposes multiple h/w queues.
 285         *
 286         * Return value: Number of completed entries found.
 287         *
 288         * Status: OPTIONAL
 289         */
 290        int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
 291
 292        /*
 293         * Check if scatterlists need to be padded for DMA draining.
 294         *
 295         * Status: OPTIONAL
 296         */
 297        bool (* dma_need_drain)(struct request *rq);
 298
 299        /*
 300         * This function determines the BIOS parameters for a given
 301         * harddisk.  These tend to be numbers that are made up by
 302         * the host adapter.  Parameters:
 303         * size, device, list (heads, sectors, cylinders)
 304         *
 305         * Status: OPTIONAL
 306         */
 307        int (* bios_param)(struct scsi_device *, struct block_device *,
 308                        sector_t, int []);
 309
 310        /*
 311         * This function is called when one or more partitions on the
 312         * device reach beyond the end of the device.
 313         *
 314         * Status: OPTIONAL
 315         */
 316        void (*unlock_native_capacity)(struct scsi_device *);
 317
 318        /*
 319         * Can be used to export driver statistics and other infos to the
 320         * world outside the kernel ie. userspace and it also provides an
 321         * interface to feed the driver with information.
 322         *
 323         * Status: OBSOLETE
 324         */
 325        int (*show_info)(struct seq_file *, struct Scsi_Host *);
 326        int (*write_info)(struct Scsi_Host *, char *, int);
 327
 328        /*
 329         * This is an optional routine that allows the transport to become
 330         * involved when a scsi io timer fires. The return value tells the
 331         * timer routine how to finish the io timeout handling.
 332         *
 333         * Status: OPTIONAL
 334         */
 335        enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
 336        /*
 337         * Optional routine that allows the transport to decide if a cmd
 338         * is retryable. Return true if the transport is in a state the
 339         * cmd should be retried on.
 340         */
 341        bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
 342
 343        /* This is an optional routine that allows transport to initiate
 344         * LLD adapter or firmware reset using sysfs attribute.
 345         *
 346         * Return values: 0 on success, -ve value on failure.
 347         *
 348         * Status: OPTIONAL
 349         */
 350
 351        int (*host_reset)(struct Scsi_Host *shost, int reset_type);
 352#define SCSI_ADAPTER_RESET      1
 353#define SCSI_FIRMWARE_RESET     2
 354
 355
 356        /*
 357         * Name of proc directory
 358         */
 359        const char *proc_name;
 360
 361        /*
 362         * Used to store the procfs directory if a driver implements the
 363         * show_info method.
 364         */
 365        struct proc_dir_entry *proc_dir;
 366
 367        /*
 368         * This determines if we will use a non-interrupt driven
 369         * or an interrupt driven scheme.  It is set to the maximum number
 370         * of simultaneous commands a single hw queue in HBA will accept.
 371         */
 372        int can_queue;
 373
 374        /*
 375         * In many instances, especially where disconnect / reconnect are
 376         * supported, our host also has an ID on the SCSI bus.  If this is
 377         * the case, then it must be reserved.  Please set this_id to -1 if
 378         * your setup is in single initiator mode, and the host lacks an
 379         * ID.
 380         */
 381        int this_id;
 382
 383        /*
 384         * This determines the degree to which the host adapter is capable
 385         * of scatter-gather.
 386         */
 387        unsigned short sg_tablesize;
 388        unsigned short sg_prot_tablesize;
 389
 390        /*
 391         * Set this if the host adapter has limitations beside segment count.
 392         */
 393        unsigned int max_sectors;
 394
 395        /*
 396         * Maximum size in bytes of a single segment.
 397         */
 398        unsigned int max_segment_size;
 399
 400        /*
 401         * DMA scatter gather segment boundary limit. A segment crossing this
 402         * boundary will be split in two.
 403         */
 404        unsigned long dma_boundary;
 405
 406        unsigned long virt_boundary_mask;
 407
 408        /*
 409         * This specifies "machine infinity" for host templates which don't
 410         * limit the transfer size.  Note this limit represents an absolute
 411         * maximum, and may be over the transfer limits allowed for
 412         * individual devices (e.g. 256 for SCSI-1).
 413         */
 414#define SCSI_DEFAULT_MAX_SECTORS        1024
 415
 416        /*
 417         * True if this host adapter can make good use of linked commands.
 418         * This will allow more than one command to be queued to a given
 419         * unit on a given host.  Set this to the maximum number of command
 420         * blocks to be provided for each device.  Set this to 1 for one
 421         * command block per lun, 2 for two, etc.  Do not set this to 0.
 422         * You should make sure that the host adapter will do the right thing
 423         * before you try setting this above 1.
 424         */
 425        short cmd_per_lun;
 426
 427        /*
 428         * present contains counter indicating how many boards of this
 429         * type were found when we did the scan.
 430         */
 431        unsigned char present;
 432
 433        /* If use block layer to manage tags, this is tag allocation policy */
 434        int tag_alloc_policy;
 435
 436        /*
 437         * Track QUEUE_FULL events and reduce queue depth on demand.
 438         */
 439        unsigned track_queue_depth:1;
 440
 441        /*
 442         * This specifies the mode that a LLD supports.
 443         */
 444        unsigned supported_mode:2;
 445
 446        /*
 447         * True for emulated SCSI host adapters (e.g. ATAPI).
 448         */
 449        unsigned emulated:1;
 450
 451        /*
 452         * True if the low-level driver performs its own reset-settle delays.
 453         */
 454        unsigned skip_settle_delay:1;
 455
 456        /* True if the controller does not support WRITE SAME */
 457        unsigned no_write_same:1;
 458
 459        /* True if the host uses host-wide tagspace */
 460        unsigned host_tagset:1;
 461
 462        /*
 463         * Countdown for host blocking with no commands outstanding.
 464         */
 465        unsigned int max_host_blocked;
 466
 467        /*
 468         * Default value for the blocking.  If the queue is empty,
 469         * host_blocked counts down in the request_fn until it restarts
 470         * host operations as zero is reached.  
 471         *
 472         * FIXME: This should probably be a value in the template
 473         */
 474#define SCSI_DEFAULT_HOST_BLOCKED       7
 475
 476        /*
 477         * Pointer to the SCSI host sysfs attribute groups, NULL terminated.
 478         */
 479        const struct attribute_group **shost_groups;
 480
 481        /*
 482         * Pointer to the SCSI device attribute groups for this host,
 483         * NULL terminated.
 484         */
 485        const struct attribute_group **sdev_groups;
 486
 487        /*
 488         * Vendor Identifier associated with the host
 489         *
 490         * Note: When specifying vendor_id, be sure to read the
 491         *   Vendor Type and ID formatting requirements specified in
 492         *   scsi_netlink.h
 493         */
 494        u64 vendor_id;
 495
 496        struct scsi_host_cmd_pool *cmd_pool;
 497
 498        /* Delay for runtime autosuspend */
 499        int rpm_autosuspend_delay;
 500};
 501
 502/*
 503 * Temporary #define for host lock push down. Can be removed when all
 504 * drivers have been updated to take advantage of unlocked
 505 * queuecommand.
 506 *
 507 */
 508#define DEF_SCSI_QCMD(func_name) \
 509        int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)   \
 510        {                                                               \
 511                unsigned long irq_flags;                                \
 512                int rc;                                                 \
 513                spin_lock_irqsave(shost->host_lock, irq_flags);         \
 514                rc = func_name##_lck(cmd);                              \
 515                spin_unlock_irqrestore(shost->host_lock, irq_flags);    \
 516                return rc;                                              \
 517        }
 518
 519
 520/*
 521 * shost state: If you alter this, you also need to alter scsi_sysfs.c
 522 * (for the ascii descriptions) and the state model enforcer:
 523 * scsi_host_set_state()
 524 */
 525enum scsi_host_state {
 526        SHOST_CREATED = 1,
 527        SHOST_RUNNING,
 528        SHOST_CANCEL,
 529        SHOST_DEL,
 530        SHOST_RECOVERY,
 531        SHOST_CANCEL_RECOVERY,
 532        SHOST_DEL_RECOVERY,
 533};
 534
 535struct Scsi_Host {
 536        /*
 537         * __devices is protected by the host_lock, but you should
 538         * usually use scsi_device_lookup / shost_for_each_device
 539         * to access it and don't care about locking yourself.
 540         * In the rare case of being in irq context you can use
 541         * their __ prefixed variants with the lock held. NEVER
 542         * access this list directly from a driver.
 543         */
 544        struct list_head        __devices;
 545        struct list_head        __targets;
 546        
 547        struct list_head        starved_list;
 548
 549        spinlock_t              default_lock;
 550        spinlock_t              *host_lock;
 551
 552        struct mutex            scan_mutex;/* serialize scanning activity */
 553
 554        struct list_head        eh_abort_list;
 555        struct list_head        eh_cmd_q;
 556        struct task_struct    * ehandler;  /* Error recovery thread. */
 557        struct completion     * eh_action; /* Wait for specific actions on the
 558                                              host. */
 559        wait_queue_head_t       host_wait;
 560        struct scsi_host_template *hostt;
 561        struct scsi_transport_template *transportt;
 562
 563        /* Area to keep a shared tag map */
 564        struct blk_mq_tag_set   tag_set;
 565
 566        atomic_t host_blocked;
 567
 568        unsigned int host_failed;          /* commands that failed.
 569                                              protected by host_lock */
 570        unsigned int host_eh_scheduled;    /* EH scheduled without command */
 571    
 572        unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
 573
 574        /* next two fields are used to bound the time spent in error handling */
 575        int eh_deadline;
 576        unsigned long last_reset;
 577
 578
 579        /*
 580         * These three parameters can be used to allow for wide scsi,
 581         * and for host adapters that support multiple busses
 582         * The last two should be set to 1 more than the actual max id
 583         * or lun (e.g. 8 for SCSI parallel systems).
 584         */
 585        unsigned int max_channel;
 586        unsigned int max_id;
 587        u64 max_lun;
 588
 589        /*
 590         * This is a unique identifier that must be assigned so that we
 591         * have some way of identifying each detected host adapter properly
 592         * and uniquely.  For hosts that do not support more than one card
 593         * in the system at one time, this does not need to be set.  It is
 594         * initialized to 0 in scsi_register.
 595         */
 596        unsigned int unique_id;
 597
 598        /*
 599         * The maximum length of SCSI commands that this host can accept.
 600         * Probably 12 for most host adapters, but could be 16 for others.
 601         * or 260 if the driver supports variable length cdbs.
 602         * For drivers that don't set this field, a value of 12 is
 603         * assumed.
 604         */
 605        unsigned short max_cmd_len;
 606
 607        int this_id;
 608        int can_queue;
 609        short cmd_per_lun;
 610        short unsigned int sg_tablesize;
 611        short unsigned int sg_prot_tablesize;
 612        unsigned int max_sectors;
 613        unsigned int max_segment_size;
 614        unsigned long dma_boundary;
 615        unsigned long virt_boundary_mask;
 616        /*
 617         * In scsi-mq mode, the number of hardware queues supported by the LLD.
 618         *
 619         * Note: it is assumed that each hardware queue has a queue depth of
 620         * can_queue. In other words, the total queue depth per host
 621         * is nr_hw_queues * can_queue. However, for when host_tagset is set,
 622         * the total queue depth is can_queue.
 623         */
 624        unsigned nr_hw_queues;
 625        unsigned nr_maps;
 626        unsigned active_mode:2;
 627
 628        /*
 629         * Host has requested that no further requests come through for the
 630         * time being.
 631         */
 632        unsigned host_self_blocked:1;
 633    
 634        /*
 635         * Host uses correct SCSI ordering not PC ordering. The bit is
 636         * set for the minority of drivers whose authors actually read
 637         * the spec ;).
 638         */
 639        unsigned reverse_ordering:1;
 640
 641        /* Task mgmt function in progress */
 642        unsigned tmf_in_progress:1;
 643
 644        /* Asynchronous scan in progress */
 645        unsigned async_scan:1;
 646
 647        /* Don't resume host in EH */
 648        unsigned eh_noresume:1;
 649
 650        /* The controller does not support WRITE SAME */
 651        unsigned no_write_same:1;
 652
 653        /* True if the host uses host-wide tagspace */
 654        unsigned host_tagset:1;
 655
 656        /* Host responded with short (<36 bytes) INQUIRY result */
 657        unsigned short_inquiry:1;
 658
 659        /* The transport requires the LUN bits NOT to be stored in CDB[1] */
 660        unsigned no_scsi2_lun_in_cdb:1;
 661
 662        /*
 663         * Optional work queue to be utilized by the transport
 664         */
 665        char work_q_name[20];
 666        struct workqueue_struct *work_q;
 667
 668        /*
 669         * Task management function work queue
 670         */
 671        struct workqueue_struct *tmf_work_q;
 672
 673        /*
 674         * Value host_blocked counts down from
 675         */
 676        unsigned int max_host_blocked;
 677
 678        /* Protection Information */
 679        unsigned int prot_capabilities;
 680        unsigned char prot_guard_type;
 681
 682        /* legacy crap */
 683        unsigned long base;
 684        unsigned long io_port;
 685        unsigned char n_io_port;
 686        unsigned char dma_channel;
 687        unsigned int  irq;
 688        
 689
 690        enum scsi_host_state shost_state;
 691
 692        /* ldm bits */
 693        struct device           shost_gendev, shost_dev;
 694        /*
 695         * The array size 3 provides space for one attribute group defined by
 696         * the SCSI core, one attribute group defined by the SCSI LLD and one
 697         * terminating NULL pointer.
 698         */
 699        const struct attribute_group *shost_dev_attr_groups[3];
 700
 701        /*
 702         * Points to the transport data (if any) which is allocated
 703         * separately
 704         */
 705        void *shost_data;
 706
 707        /*
 708         * Points to the physical bus device we'd use to do DMA
 709         * Needed just in case we have virtual hosts.
 710         */
 711        struct device *dma_dev;
 712
 713        /*
 714         * We should ensure that this is aligned, both for better performance
 715         * and also because some compilers (m68k) don't automatically force
 716         * alignment to a long boundary.
 717         */
 718        unsigned long hostdata[]  /* Used for storage of host specific stuff */
 719                __attribute__ ((aligned (sizeof(unsigned long))));
 720};
 721
 722#define         class_to_shost(d)       \
 723        container_of(d, struct Scsi_Host, shost_dev)
 724
 725#define shost_printk(prefix, shost, fmt, a...)  \
 726        dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
 727
 728static inline void *shost_priv(struct Scsi_Host *shost)
 729{
 730        return (void *)shost->hostdata;
 731}
 732
 733int scsi_is_host_device(const struct device *);
 734
 735static inline struct Scsi_Host *dev_to_shost(struct device *dev)
 736{
 737        while (!scsi_is_host_device(dev)) {
 738                if (!dev->parent)
 739                        return NULL;
 740                dev = dev->parent;
 741        }
 742        return container_of(dev, struct Scsi_Host, shost_gendev);
 743}
 744
 745static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
 746{
 747        return shost->shost_state == SHOST_RECOVERY ||
 748                shost->shost_state == SHOST_CANCEL_RECOVERY ||
 749                shost->shost_state == SHOST_DEL_RECOVERY ||
 750                shost->tmf_in_progress;
 751}
 752
 753extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
 754extern void scsi_flush_work(struct Scsi_Host *);
 755
 756extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
 757extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
 758                                               struct device *,
 759                                               struct device *);
 760extern void scsi_scan_host(struct Scsi_Host *);
 761extern void scsi_rescan_device(struct device *);
 762extern void scsi_remove_host(struct Scsi_Host *);
 763extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
 764extern int scsi_host_busy(struct Scsi_Host *shost);
 765extern void scsi_host_put(struct Scsi_Host *t);
 766extern struct Scsi_Host *scsi_host_lookup(unsigned short);
 767extern const char *scsi_host_state_name(enum scsi_host_state);
 768extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
 769                                            enum scsi_host_status status);
 770
 771static inline int __must_check scsi_add_host(struct Scsi_Host *host,
 772                                             struct device *dev)
 773{
 774        return scsi_add_host_with_dma(host, dev, dev);
 775}
 776
 777static inline struct device *scsi_get_device(struct Scsi_Host *shost)
 778{
 779        return shost->shost_gendev.parent;
 780}
 781
 782/**
 783 * scsi_host_scan_allowed - Is scanning of this host allowed
 784 * @shost:      Pointer to Scsi_Host.
 785 **/
 786static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
 787{
 788        return shost->shost_state == SHOST_RUNNING ||
 789               shost->shost_state == SHOST_RECOVERY;
 790}
 791
 792extern void scsi_unblock_requests(struct Scsi_Host *);
 793extern void scsi_block_requests(struct Scsi_Host *);
 794extern int scsi_host_block(struct Scsi_Host *shost);
 795extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
 796
 797void scsi_host_busy_iter(struct Scsi_Host *,
 798                         bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv);
 799
 800struct class_container;
 801
 802/*
 803 * DIF defines the exchange of protection information between
 804 * initiator and SBC block device.
 805 *
 806 * DIX defines the exchange of protection information between OS and
 807 * initiator.
 808 */
 809enum scsi_host_prot_capabilities {
 810        SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
 811        SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
 812        SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
 813
 814        SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
 815        SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
 816        SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
 817        SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
 818};
 819
 820/*
 821 * SCSI hosts which support the Data Integrity Extensions must
 822 * indicate their capabilities by setting the prot_capabilities using
 823 * this call.
 824 */
 825static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
 826{
 827        shost->prot_capabilities = mask;
 828}
 829
 830static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
 831{
 832        return shost->prot_capabilities;
 833}
 834
 835static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
 836{
 837        return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
 838}
 839
 840static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
 841{
 842        static unsigned char cap[] = { 0,
 843                                       SHOST_DIF_TYPE1_PROTECTION,
 844                                       SHOST_DIF_TYPE2_PROTECTION,
 845                                       SHOST_DIF_TYPE3_PROTECTION };
 846
 847        if (target_type >= ARRAY_SIZE(cap))
 848                return 0;
 849
 850        return shost->prot_capabilities & cap[target_type] ? target_type : 0;
 851}
 852
 853static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
 854{
 855#if defined(CONFIG_BLK_DEV_INTEGRITY)
 856        static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
 857                                       SHOST_DIX_TYPE1_PROTECTION,
 858                                       SHOST_DIX_TYPE2_PROTECTION,
 859                                       SHOST_DIX_TYPE3_PROTECTION };
 860
 861        if (target_type >= ARRAY_SIZE(cap))
 862                return 0;
 863
 864        return shost->prot_capabilities & cap[target_type];
 865#endif
 866        return 0;
 867}
 868
 869/*
 870 * All DIX-capable initiators must support the T10-mandated CRC
 871 * checksum.  Controllers can optionally implement the IP checksum
 872 * scheme which has much lower impact on system performance.  Note
 873 * that the main rationale for the checksum is to match integrity
 874 * metadata with data.  Detecting bit errors are a job for ECC memory
 875 * and buses.
 876 */
 877
 878enum scsi_host_guard_type {
 879        SHOST_DIX_GUARD_CRC = 1 << 0,
 880        SHOST_DIX_GUARD_IP  = 1 << 1,
 881};
 882
 883static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
 884{
 885        shost->prot_guard_type = type;
 886}
 887
 888static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
 889{
 890        return shost->prot_guard_type;
 891}
 892
 893extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
 894
 895#endif /* _SCSI_SCSI_HOST_H */
 896