linux/include/scsi/scsi_host.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _SCSI_SCSI_HOST_H
   3#define _SCSI_SCSI_HOST_H
   4
   5#include <linux/device.h>
   6#include <linux/list.h>
   7#include <linux/types.h>
   8#include <linux/workqueue.h>
   9#include <linux/mutex.h>
  10#include <linux/seq_file.h>
  11#include <linux/blk-mq.h>
  12#include <scsi/scsi.h>
  13
  14struct request_queue;
  15struct block_device;
  16struct completion;
  17struct module;
  18struct scsi_cmnd;
  19struct scsi_device;
  20struct scsi_host_cmd_pool;
  21struct scsi_target;
  22struct Scsi_Host;
  23struct scsi_host_cmd_pool;
  24struct scsi_transport_template;
  25struct blk_queue_tags;
  26
  27
  28/*
  29 * The various choices mean:
  30 * NONE: Self evident.  Host adapter is not capable of scatter-gather.
  31 * ALL:  Means that the host adapter module can do scatter-gather,
  32 *       and that there is no limit to the size of the table to which
  33 *       we scatter/gather data.  The value we set here is the maximum
  34 *       single element sglist.  To use chained sglists, the adapter
  35 *       has to set a value beyond ALL (and correctly use the chain
  36 *       handling API.
  37 * Anything else:  Indicates the maximum number of chains that can be
  38 *       used in one scatter-gather request.
  39 */
  40#define SG_NONE 0
  41#define SG_ALL  SG_CHUNK_SIZE
  42
  43#define MODE_UNKNOWN 0x00
  44#define MODE_INITIATOR 0x01
  45#define MODE_TARGET 0x02
  46
  47#define DISABLE_CLUSTERING 0
  48#define ENABLE_CLUSTERING 1
  49
  50struct scsi_host_template {
  51        struct module *module;
  52        const char *name;
  53
  54        /*
  55         * Used to initialize old-style drivers.  For new-style drivers
  56         * just perform all work in your module initialization function.
  57         *
  58         * Status:  OBSOLETE
  59         */
  60        int (* detect)(struct scsi_host_template *);
  61
  62        /*
  63         * Used as unload callback for hosts with old-style drivers.
  64         *
  65         * Status: OBSOLETE
  66         */
  67        int (* release)(struct Scsi_Host *);
  68
  69        /*
  70         * The info function will return whatever useful information the
  71         * developer sees fit.  If not provided, then the name field will
  72         * be used instead.
  73         *
  74         * Status: OPTIONAL
  75         */
  76        const char *(* info)(struct Scsi_Host *);
  77
  78        /*
  79         * Ioctl interface
  80         *
  81         * Status: OPTIONAL
  82         */
  83        int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
  84
  85
  86#ifdef CONFIG_COMPAT
  87        /* 
  88         * Compat handler. Handle 32bit ABI.
  89         * When unknown ioctl is passed return -ENOIOCTLCMD.
  90         *
  91         * Status: OPTIONAL
  92         */
  93        int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
  94#endif
  95
  96        /*
  97         * The queuecommand function is used to queue up a scsi
  98         * command block to the LLDD.  When the driver finished
  99         * processing the command the done callback is invoked.
 100         *
 101         * If queuecommand returns 0, then the HBA has accepted the
 102         * command.  The done() function must be called on the command
 103         * when the driver has finished with it. (you may call done on the
 104         * command before queuecommand returns, but in this case you
 105         * *must* return 0 from queuecommand).
 106         *
 107         * Queuecommand may also reject the command, in which case it may
 108         * not touch the command and must not call done() for it.
 109         *
 110         * There are two possible rejection returns:
 111         *
 112         *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
 113         *   allow commands to other devices serviced by this host.
 114         *
 115         *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
 116         *   host temporarily.
 117         *
 118         * For compatibility, any other non-zero return is treated the
 119         * same as SCSI_MLQUEUE_HOST_BUSY.
 120         *
 121         * NOTE: "temporarily" means either until the next command for#
 122         * this device/host completes, or a period of time determined by
 123         * I/O pressure in the system if there are no other outstanding
 124         * commands.
 125         *
 126         * STATUS: REQUIRED
 127         */
 128        int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
 129
 130        /*
 131         * This is an error handling strategy routine.  You don't need to
 132         * define one of these if you don't want to - there is a default
 133         * routine that is present that should work in most cases.  For those
 134         * driver authors that have the inclination and ability to write their
 135         * own strategy routine, this is where it is specified.  Note - the
 136         * strategy routine is *ALWAYS* run in the context of the kernel eh
 137         * thread.  Thus you are guaranteed to *NOT* be in an interrupt
 138         * handler when you execute this, and you are also guaranteed to
 139         * *NOT* have any other commands being queued while you are in the
 140         * strategy routine. When you return from this function, operations
 141         * return to normal.
 142         *
 143         * See scsi_error.c scsi_unjam_host for additional comments about
 144         * what this function should and should not be attempting to do.
 145         *
 146         * Status: REQUIRED     (at least one of them)
 147         */
 148        int (* eh_abort_handler)(struct scsi_cmnd *);
 149        int (* eh_device_reset_handler)(struct scsi_cmnd *);
 150        int (* eh_target_reset_handler)(struct scsi_cmnd *);
 151        int (* eh_bus_reset_handler)(struct scsi_cmnd *);
 152        int (* eh_host_reset_handler)(struct scsi_cmnd *);
 153
 154        /*
 155         * Before the mid layer attempts to scan for a new device where none
 156         * currently exists, it will call this entry in your driver.  Should
 157         * your driver need to allocate any structs or perform any other init
 158         * items in order to send commands to a currently unused target/lun
 159         * combo, then this is where you can perform those allocations.  This
 160         * is specifically so that drivers won't have to perform any kind of
 161         * "is this a new device" checks in their queuecommand routine,
 162         * thereby making the hot path a bit quicker.
 163         *
 164         * Return values: 0 on success, non-0 on failure
 165         *
 166         * Deallocation:  If we didn't find any devices at this ID, you will
 167         * get an immediate call to slave_destroy().  If we find something
 168         * here then you will get a call to slave_configure(), then the
 169         * device will be used for however long it is kept around, then when
 170         * the device is removed from the system (or * possibly at reboot
 171         * time), you will then get a call to slave_destroy().  This is
 172         * assuming you implement slave_configure and slave_destroy.
 173         * However, if you allocate memory and hang it off the device struct,
 174         * then you must implement the slave_destroy() routine at a minimum
 175         * in order to avoid leaking memory
 176         * each time a device is tore down.
 177         *
 178         * Status: OPTIONAL
 179         */
 180        int (* slave_alloc)(struct scsi_device *);
 181
 182        /*
 183         * Once the device has responded to an INQUIRY and we know the
 184         * device is online, we call into the low level driver with the
 185         * struct scsi_device *.  If the low level device driver implements
 186         * this function, it *must* perform the task of setting the queue
 187         * depth on the device.  All other tasks are optional and depend
 188         * on what the driver supports and various implementation details.
 189         * 
 190         * Things currently recommended to be handled at this time include:
 191         *
 192         * 1.  Setting the device queue depth.  Proper setting of this is
 193         *     described in the comments for scsi_change_queue_depth.
 194         * 2.  Determining if the device supports the various synchronous
 195         *     negotiation protocols.  The device struct will already have
 196         *     responded to INQUIRY and the results of the standard items
 197         *     will have been shoved into the various device flag bits, eg.
 198         *     device->sdtr will be true if the device supports SDTR messages.
 199         * 3.  Allocating command structs that the device will need.
 200         * 4.  Setting the default timeout on this device (if needed).
 201         * 5.  Anything else the low level driver might want to do on a device
 202         *     specific setup basis...
 203         * 6.  Return 0 on success, non-0 on error.  The device will be marked
 204         *     as offline on error so that no access will occur.  If you return
 205         *     non-0, your slave_destroy routine will never get called for this
 206         *     device, so don't leave any loose memory hanging around, clean
 207         *     up after yourself before returning non-0
 208         *
 209         * Status: OPTIONAL
 210         */
 211        int (* slave_configure)(struct scsi_device *);
 212
 213        /*
 214         * Immediately prior to deallocating the device and after all activity
 215         * has ceased the mid layer calls this point so that the low level
 216         * driver may completely detach itself from the scsi device and vice
 217         * versa.  The low level driver is responsible for freeing any memory
 218         * it allocated in the slave_alloc or slave_configure calls. 
 219         *
 220         * Status: OPTIONAL
 221         */
 222        void (* slave_destroy)(struct scsi_device *);
 223
 224        /*
 225         * Before the mid layer attempts to scan for a new device attached
 226         * to a target where no target currently exists, it will call this
 227         * entry in your driver.  Should your driver need to allocate any
 228         * structs or perform any other init items in order to send commands
 229         * to a currently unused target, then this is where you can perform
 230         * those allocations.
 231         *
 232         * Return values: 0 on success, non-0 on failure
 233         *
 234         * Status: OPTIONAL
 235         */
 236        int (* target_alloc)(struct scsi_target *);
 237
 238        /*
 239         * Immediately prior to deallocating the target structure, and
 240         * after all activity to attached scsi devices has ceased, the
 241         * midlayer calls this point so that the driver may deallocate
 242         * and terminate any references to the target.
 243         *
 244         * Status: OPTIONAL
 245         */
 246        void (* target_destroy)(struct scsi_target *);
 247
 248        /*
 249         * If a host has the ability to discover targets on its own instead
 250         * of scanning the entire bus, it can fill in this function and
 251         * call scsi_scan_host().  This function will be called periodically
 252         * until it returns 1 with the scsi_host and the elapsed time of
 253         * the scan in jiffies.
 254         *
 255         * Status: OPTIONAL
 256         */
 257        int (* scan_finished)(struct Scsi_Host *, unsigned long);
 258
 259        /*
 260         * If the host wants to be called before the scan starts, but
 261         * after the midlayer has set up ready for the scan, it can fill
 262         * in this function.
 263         *
 264         * Status: OPTIONAL
 265         */
 266        void (* scan_start)(struct Scsi_Host *);
 267
 268        /*
 269         * Fill in this function to allow the queue depth of this host
 270         * to be changeable (on a per device basis).  Returns either
 271         * the current queue depth setting (may be different from what
 272         * was passed in) or an error.  An error should only be
 273         * returned if the requested depth is legal but the driver was
 274         * unable to set it.  If the requested depth is illegal, the
 275         * driver should set and return the closest legal queue depth.
 276         *
 277         * Status: OPTIONAL
 278         */
 279        int (* change_queue_depth)(struct scsi_device *, int);
 280
 281        /*
 282         * This functions lets the driver expose the queue mapping
 283         * to the block layer.
 284         *
 285         * Status: OPTIONAL
 286         */
 287        int (* map_queues)(struct Scsi_Host *shost);
 288
 289        /*
 290         * This function determines the BIOS parameters for a given
 291         * harddisk.  These tend to be numbers that are made up by
 292         * the host adapter.  Parameters:
 293         * size, device, list (heads, sectors, cylinders)
 294         *
 295         * Status: OPTIONAL
 296         */
 297        int (* bios_param)(struct scsi_device *, struct block_device *,
 298                        sector_t, int []);
 299
 300        /*
 301         * This function is called when one or more partitions on the
 302         * device reach beyond the end of the device.
 303         *
 304         * Status: OPTIONAL
 305         */
 306        void (*unlock_native_capacity)(struct scsi_device *);
 307
 308        /*
 309         * Can be used to export driver statistics and other infos to the
 310         * world outside the kernel ie. userspace and it also provides an
 311         * interface to feed the driver with information.
 312         *
 313         * Status: OBSOLETE
 314         */
 315        int (*show_info)(struct seq_file *, struct Scsi_Host *);
 316        int (*write_info)(struct Scsi_Host *, char *, int);
 317
 318        /*
 319         * This is an optional routine that allows the transport to become
 320         * involved when a scsi io timer fires. The return value tells the
 321         * timer routine how to finish the io timeout handling:
 322         * EH_HANDLED:          I fixed the error, please complete the command
 323         * EH_RESET_TIMER:      I need more time, reset the timer and
 324         *                      begin counting again
 325         * EH_NOT_HANDLED       Begin normal error recovery
 326         *
 327         * Status: OPTIONAL
 328         */
 329        enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
 330
 331        /* This is an optional routine that allows transport to initiate
 332         * LLD adapter or firmware reset using sysfs attribute.
 333         *
 334         * Return values: 0 on success, -ve value on failure.
 335         *
 336         * Status: OPTIONAL
 337         */
 338
 339        int (*host_reset)(struct Scsi_Host *shost, int reset_type);
 340#define SCSI_ADAPTER_RESET      1
 341#define SCSI_FIRMWARE_RESET     2
 342
 343
 344        /*
 345         * Name of proc directory
 346         */
 347        const char *proc_name;
 348
 349        /*
 350         * Used to store the procfs directory if a driver implements the
 351         * show_info method.
 352         */
 353        struct proc_dir_entry *proc_dir;
 354
 355        /*
 356         * This determines if we will use a non-interrupt driven
 357         * or an interrupt driven scheme.  It is set to the maximum number
 358         * of simultaneous commands a given host adapter will accept.
 359         */
 360        int can_queue;
 361
 362        /*
 363         * In many instances, especially where disconnect / reconnect are
 364         * supported, our host also has an ID on the SCSI bus.  If this is
 365         * the case, then it must be reserved.  Please set this_id to -1 if
 366         * your setup is in single initiator mode, and the host lacks an
 367         * ID.
 368         */
 369        int this_id;
 370
 371        /*
 372         * This determines the degree to which the host adapter is capable
 373         * of scatter-gather.
 374         */
 375        unsigned short sg_tablesize;
 376        unsigned short sg_prot_tablesize;
 377
 378        /*
 379         * Set this if the host adapter has limitations beside segment count.
 380         */
 381        unsigned int max_sectors;
 382
 383        /*
 384         * DMA scatter gather segment boundary limit. A segment crossing this
 385         * boundary will be split in two.
 386         */
 387        unsigned long dma_boundary;
 388
 389        /*
 390         * This specifies "machine infinity" for host templates which don't
 391         * limit the transfer size.  Note this limit represents an absolute
 392         * maximum, and may be over the transfer limits allowed for
 393         * individual devices (e.g. 256 for SCSI-1).
 394         */
 395#define SCSI_DEFAULT_MAX_SECTORS        1024
 396
 397        /*
 398         * True if this host adapter can make good use of linked commands.
 399         * This will allow more than one command to be queued to a given
 400         * unit on a given host.  Set this to the maximum number of command
 401         * blocks to be provided for each device.  Set this to 1 for one
 402         * command block per lun, 2 for two, etc.  Do not set this to 0.
 403         * You should make sure that the host adapter will do the right thing
 404         * before you try setting this above 1.
 405         */
 406        short cmd_per_lun;
 407
 408        /*
 409         * present contains counter indicating how many boards of this
 410         * type were found when we did the scan.
 411         */
 412        unsigned char present;
 413
 414        /* If use block layer to manage tags, this is tag allocation policy */
 415        int tag_alloc_policy;
 416
 417        /*
 418         * Track QUEUE_FULL events and reduce queue depth on demand.
 419         */
 420        unsigned track_queue_depth:1;
 421
 422        /*
 423         * This specifies the mode that a LLD supports.
 424         */
 425        unsigned supported_mode:2;
 426
 427        /*
 428         * True if this host adapter uses unchecked DMA onto an ISA bus.
 429         */
 430        unsigned unchecked_isa_dma:1;
 431
 432        /*
 433         * True if this host adapter can make good use of clustering.
 434         * I originally thought that if the tablesize was large that it
 435         * was a waste of CPU cycles to prepare a cluster list, but
 436         * it works out that the Buslogic is faster if you use a smaller
 437         * number of segments (i.e. use clustering).  I guess it is
 438         * inefficient.
 439         */
 440        unsigned use_clustering:1;
 441
 442        /*
 443         * True for emulated SCSI host adapters (e.g. ATAPI).
 444         */
 445        unsigned emulated:1;
 446
 447        /*
 448         * True if the low-level driver performs its own reset-settle delays.
 449         */
 450        unsigned skip_settle_delay:1;
 451
 452        /* True if the controller does not support WRITE SAME */
 453        unsigned no_write_same:1;
 454
 455        /* True if the low-level driver supports blk-mq only */
 456        unsigned force_blk_mq:1;
 457
 458        /*
 459         * Countdown for host blocking with no commands outstanding.
 460         */
 461        unsigned int max_host_blocked;
 462
 463        /*
 464         * Default value for the blocking.  If the queue is empty,
 465         * host_blocked counts down in the request_fn until it restarts
 466         * host operations as zero is reached.  
 467         *
 468         * FIXME: This should probably be a value in the template
 469         */
 470#define SCSI_DEFAULT_HOST_BLOCKED       7
 471
 472        /*
 473         * Pointer to the sysfs class properties for this host, NULL terminated.
 474         */
 475        struct device_attribute **shost_attrs;
 476
 477        /*
 478         * Pointer to the SCSI device properties for this host, NULL terminated.
 479         */
 480        struct device_attribute **sdev_attrs;
 481
 482        /*
 483         * List of hosts per template.
 484         *
 485         * This is only for use by scsi_module.c for legacy templates.
 486         * For these access to it is synchronized implicitly by
 487         * module_init/module_exit.
 488         */
 489        struct list_head legacy_hosts;
 490
 491        /*
 492         * Vendor Identifier associated with the host
 493         *
 494         * Note: When specifying vendor_id, be sure to read the
 495         *   Vendor Type and ID formatting requirements specified in
 496         *   scsi_netlink.h
 497         */
 498        u64 vendor_id;
 499
 500        /*
 501         * Additional per-command data allocated for the driver.
 502         */
 503        unsigned int cmd_size;
 504        struct scsi_host_cmd_pool *cmd_pool;
 505};
 506
 507/*
 508 * Temporary #define for host lock push down. Can be removed when all
 509 * drivers have been updated to take advantage of unlocked
 510 * queuecommand.
 511 *
 512 */
 513#define DEF_SCSI_QCMD(func_name) \
 514        int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)   \
 515        {                                                               \
 516                unsigned long irq_flags;                                \
 517                int rc;                                                 \
 518                spin_lock_irqsave(shost->host_lock, irq_flags);         \
 519                scsi_cmd_get_serial(shost, cmd);                        \
 520                rc = func_name##_lck (cmd, cmd->scsi_done);                     \
 521                spin_unlock_irqrestore(shost->host_lock, irq_flags);    \
 522                return rc;                                              \
 523        }
 524
 525
 526/*
 527 * shost state: If you alter this, you also need to alter scsi_sysfs.c
 528 * (for the ascii descriptions) and the state model enforcer:
 529 * scsi_host_set_state()
 530 */
 531enum scsi_host_state {
 532        SHOST_CREATED = 1,
 533        SHOST_RUNNING,
 534        SHOST_CANCEL,
 535        SHOST_DEL,
 536        SHOST_RECOVERY,
 537        SHOST_CANCEL_RECOVERY,
 538        SHOST_DEL_RECOVERY,
 539};
 540
 541struct Scsi_Host {
 542        /*
 543         * __devices is protected by the host_lock, but you should
 544         * usually use scsi_device_lookup / shost_for_each_device
 545         * to access it and don't care about locking yourself.
 546         * In the rare case of being in irq context you can use
 547         * their __ prefixed variants with the lock held. NEVER
 548         * access this list directly from a driver.
 549         */
 550        struct list_head        __devices;
 551        struct list_head        __targets;
 552        
 553        struct list_head        starved_list;
 554
 555        spinlock_t              default_lock;
 556        spinlock_t              *host_lock;
 557
 558        struct mutex            scan_mutex;/* serialize scanning activity */
 559
 560        struct list_head        eh_cmd_q;
 561        struct task_struct    * ehandler;  /* Error recovery thread. */
 562        struct completion     * eh_action; /* Wait for specific actions on the
 563                                              host. */
 564        wait_queue_head_t       host_wait;
 565        struct scsi_host_template *hostt;
 566        struct scsi_transport_template *transportt;
 567
 568        /*
 569         * Area to keep a shared tag map (if needed, will be
 570         * NULL if not).
 571         */
 572        union {
 573                struct blk_queue_tag    *bqt;
 574                struct blk_mq_tag_set   tag_set;
 575        };
 576
 577        atomic_t host_busy;                /* commands actually active on low-level */
 578        atomic_t host_blocked;
 579
 580        unsigned int host_failed;          /* commands that failed.
 581                                              protected by host_lock */
 582        unsigned int host_eh_scheduled;    /* EH scheduled without command */
 583    
 584        unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
 585
 586        /* next two fields are used to bound the time spent in error handling */
 587        int eh_deadline;
 588        unsigned long last_reset;
 589
 590
 591        /*
 592         * These three parameters can be used to allow for wide scsi,
 593         * and for host adapters that support multiple busses
 594         * The last two should be set to 1 more than the actual max id
 595         * or lun (e.g. 8 for SCSI parallel systems).
 596         */
 597        unsigned int max_channel;
 598        unsigned int max_id;
 599        u64 max_lun;
 600
 601        /*
 602         * This is a unique identifier that must be assigned so that we
 603         * have some way of identifying each detected host adapter properly
 604         * and uniquely.  For hosts that do not support more than one card
 605         * in the system at one time, this does not need to be set.  It is
 606         * initialized to 0 in scsi_register.
 607         */
 608        unsigned int unique_id;
 609
 610        /*
 611         * The maximum length of SCSI commands that this host can accept.
 612         * Probably 12 for most host adapters, but could be 16 for others.
 613         * or 260 if the driver supports variable length cdbs.
 614         * For drivers that don't set this field, a value of 12 is
 615         * assumed.
 616         */
 617        unsigned short max_cmd_len;
 618
 619        int this_id;
 620        int can_queue;
 621        short cmd_per_lun;
 622        short unsigned int sg_tablesize;
 623        short unsigned int sg_prot_tablesize;
 624        unsigned int max_sectors;
 625        unsigned long dma_boundary;
 626        /*
 627         * In scsi-mq mode, the number of hardware queues supported by the LLD.
 628         *
 629         * Note: it is assumed that each hardware queue has a queue depth of
 630         * can_queue. In other words, the total queue depth per host
 631         * is nr_hw_queues * can_queue.
 632         */
 633        unsigned nr_hw_queues;
 634        /* 
 635         * Used to assign serial numbers to the cmds.
 636         * Protected by the host lock.
 637         */
 638        unsigned long cmd_serial_number;
 639        
 640        unsigned active_mode:2;
 641        unsigned unchecked_isa_dma:1;
 642        unsigned use_clustering:1;
 643
 644        /*
 645         * Host has requested that no further requests come through for the
 646         * time being.
 647         */
 648        unsigned host_self_blocked:1;
 649    
 650        /*
 651         * Host uses correct SCSI ordering not PC ordering. The bit is
 652         * set for the minority of drivers whose authors actually read
 653         * the spec ;).
 654         */
 655        unsigned reverse_ordering:1;
 656
 657        /* Task mgmt function in progress */
 658        unsigned tmf_in_progress:1;
 659
 660        /* Asynchronous scan in progress */
 661        unsigned async_scan:1;
 662
 663        /* Don't resume host in EH */
 664        unsigned eh_noresume:1;
 665
 666        /* The controller does not support WRITE SAME */
 667        unsigned no_write_same:1;
 668
 669        unsigned use_blk_mq:1;
 670        unsigned use_cmd_list:1;
 671
 672        /* Host responded with short (<36 bytes) INQUIRY result */
 673        unsigned short_inquiry:1;
 674
 675        /*
 676         * Optional work queue to be utilized by the transport
 677         */
 678        char work_q_name[20];
 679        struct workqueue_struct *work_q;
 680
 681        /*
 682         * Task management function work queue
 683         */
 684        struct workqueue_struct *tmf_work_q;
 685
 686        /* The transport requires the LUN bits NOT to be stored in CDB[1] */
 687        unsigned no_scsi2_lun_in_cdb:1;
 688
 689        /*
 690         * Value host_blocked counts down from
 691         */
 692        unsigned int max_host_blocked;
 693
 694        /* Protection Information */
 695        unsigned int prot_capabilities;
 696        unsigned char prot_guard_type;
 697
 698        /* legacy crap */
 699        unsigned long base;
 700        unsigned long io_port;
 701        unsigned char n_io_port;
 702        unsigned char dma_channel;
 703        unsigned int  irq;
 704        
 705
 706        enum scsi_host_state shost_state;
 707
 708        /* ldm bits */
 709        struct device           shost_gendev, shost_dev;
 710
 711        /*
 712         * List of hosts per template.
 713         *
 714         * This is only for use by scsi_module.c for legacy templates.
 715         * For these access to it is synchronized implicitly by
 716         * module_init/module_exit.
 717         */
 718        struct list_head sht_legacy_list;
 719
 720        /*
 721         * Points to the transport data (if any) which is allocated
 722         * separately
 723         */
 724        void *shost_data;
 725
 726        /*
 727         * Points to the physical bus device we'd use to do DMA
 728         * Needed just in case we have virtual hosts.
 729         */
 730        struct device *dma_dev;
 731
 732        /*
 733         * We should ensure that this is aligned, both for better performance
 734         * and also because some compilers (m68k) don't automatically force
 735         * alignment to a long boundary.
 736         */
 737        unsigned long hostdata[0]  /* Used for storage of host specific stuff */
 738                __attribute__ ((aligned (sizeof(unsigned long))));
 739};
 740
 741#define         class_to_shost(d)       \
 742        container_of(d, struct Scsi_Host, shost_dev)
 743
 744#define shost_printk(prefix, shost, fmt, a...)  \
 745        dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
 746
 747static inline void *shost_priv(struct Scsi_Host *shost)
 748{
 749        return (void *)shost->hostdata;
 750}
 751
 752int scsi_is_host_device(const struct device *);
 753
 754static inline struct Scsi_Host *dev_to_shost(struct device *dev)
 755{
 756        while (!scsi_is_host_device(dev)) {
 757                if (!dev->parent)
 758                        return NULL;
 759                dev = dev->parent;
 760        }
 761        return container_of(dev, struct Scsi_Host, shost_gendev);
 762}
 763
 764static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
 765{
 766        return shost->shost_state == SHOST_RECOVERY ||
 767                shost->shost_state == SHOST_CANCEL_RECOVERY ||
 768                shost->shost_state == SHOST_DEL_RECOVERY ||
 769                shost->tmf_in_progress;
 770}
 771
 772static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
 773{
 774        return shost->use_blk_mq;
 775}
 776
 777extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
 778extern void scsi_flush_work(struct Scsi_Host *);
 779
 780extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
 781extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
 782                                               struct device *,
 783                                               struct device *);
 784extern void scsi_scan_host(struct Scsi_Host *);
 785extern void scsi_rescan_device(struct device *);
 786extern void scsi_remove_host(struct Scsi_Host *);
 787extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
 788extern void scsi_host_put(struct Scsi_Host *t);
 789extern struct Scsi_Host *scsi_host_lookup(unsigned short);
 790extern const char *scsi_host_state_name(enum scsi_host_state);
 791extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *);
 792
 793static inline int __must_check scsi_add_host(struct Scsi_Host *host,
 794                                             struct device *dev)
 795{
 796        return scsi_add_host_with_dma(host, dev, dev);
 797}
 798
 799static inline struct device *scsi_get_device(struct Scsi_Host *shost)
 800{
 801        return shost->shost_gendev.parent;
 802}
 803
 804/**
 805 * scsi_host_scan_allowed - Is scanning of this host allowed
 806 * @shost:      Pointer to Scsi_Host.
 807 **/
 808static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
 809{
 810        return shost->shost_state == SHOST_RUNNING ||
 811               shost->shost_state == SHOST_RECOVERY;
 812}
 813
 814extern void scsi_unblock_requests(struct Scsi_Host *);
 815extern void scsi_block_requests(struct Scsi_Host *);
 816
 817struct class_container;
 818
 819/*
 820 * These two functions are used to allocate and free a pseudo device
 821 * which will connect to the host adapter itself rather than any
 822 * physical device.  You must deallocate when you are done with the
 823 * thing.  This physical pseudo-device isn't real and won't be available
 824 * from any high-level drivers.
 825 */
 826extern void scsi_free_host_dev(struct scsi_device *);
 827extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
 828
 829/*
 830 * DIF defines the exchange of protection information between
 831 * initiator and SBC block device.
 832 *
 833 * DIX defines the exchange of protection information between OS and
 834 * initiator.
 835 */
 836enum scsi_host_prot_capabilities {
 837        SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
 838        SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
 839        SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
 840
 841        SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
 842        SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
 843        SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
 844        SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
 845};
 846
 847/*
 848 * SCSI hosts which support the Data Integrity Extensions must
 849 * indicate their capabilities by setting the prot_capabilities using
 850 * this call.
 851 */
 852static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
 853{
 854        shost->prot_capabilities = mask;
 855}
 856
 857static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
 858{
 859        return shost->prot_capabilities;
 860}
 861
 862static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
 863{
 864        return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
 865}
 866
 867static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
 868{
 869        static unsigned char cap[] = { 0,
 870                                       SHOST_DIF_TYPE1_PROTECTION,
 871                                       SHOST_DIF_TYPE2_PROTECTION,
 872                                       SHOST_DIF_TYPE3_PROTECTION };
 873
 874        if (target_type >= ARRAY_SIZE(cap))
 875                return 0;
 876
 877        return shost->prot_capabilities & cap[target_type] ? target_type : 0;
 878}
 879
 880static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
 881{
 882#if defined(CONFIG_BLK_DEV_INTEGRITY)
 883        static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
 884                                       SHOST_DIX_TYPE1_PROTECTION,
 885                                       SHOST_DIX_TYPE2_PROTECTION,
 886                                       SHOST_DIX_TYPE3_PROTECTION };
 887
 888        if (target_type >= ARRAY_SIZE(cap))
 889                return 0;
 890
 891        return shost->prot_capabilities & cap[target_type];
 892#endif
 893        return 0;
 894}
 895
 896/*
 897 * All DIX-capable initiators must support the T10-mandated CRC
 898 * checksum.  Controllers can optionally implement the IP checksum
 899 * scheme which has much lower impact on system performance.  Note
 900 * that the main rationale for the checksum is to match integrity
 901 * metadata with data.  Detecting bit errors are a job for ECC memory
 902 * and buses.
 903 */
 904
 905enum scsi_host_guard_type {
 906        SHOST_DIX_GUARD_CRC = 1 << 0,
 907        SHOST_DIX_GUARD_IP  = 1 << 1,
 908};
 909
 910static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
 911{
 912        shost->prot_guard_type = type;
 913}
 914
 915static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
 916{
 917        return shost->prot_guard_type;
 918}
 919
 920/* legacy interfaces */
 921extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
 922extern void scsi_unregister(struct Scsi_Host *);
 923extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
 924
 925#endif /* _SCSI_SCSI_HOST_H */
 926