linux/include/scsi/scsi_host.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _SCSI_SCSI_HOST_H
   3#define _SCSI_SCSI_HOST_H
   4
   5#include <linux/device.h>
   6#include <linux/list.h>
   7#include <linux/types.h>
   8#include <linux/workqueue.h>
   9#include <linux/mutex.h>
  10#include <linux/seq_file.h>
  11#include <linux/blk-mq.h>
  12#include <scsi/scsi.h>
  13
  14struct block_device;
  15struct completion;
  16struct module;
  17struct scsi_cmnd;
  18struct scsi_device;
  19struct scsi_host_cmd_pool;
  20struct scsi_target;
  21struct Scsi_Host;
  22struct scsi_transport_template;
  23
  24
  25#define SG_ALL  SG_CHUNK_SIZE
  26
  27#define MODE_UNKNOWN 0x00
  28#define MODE_INITIATOR 0x01
  29#define MODE_TARGET 0x02
  30
  31struct scsi_host_template {
  32        /*
  33         * Put fields referenced in IO submission path together in
  34         * same cacheline
  35         */
  36
  37        /*
  38         * Additional per-command data allocated for the driver.
  39         */
  40        unsigned int cmd_size;
  41
  42        /*
  43         * The queuecommand function is used to queue up a scsi
  44         * command block to the LLDD.  When the driver finished
  45         * processing the command the done callback is invoked.
  46         *
  47         * If queuecommand returns 0, then the driver has accepted the
  48         * command.  It must also push it to the HBA if the scsi_cmnd
  49         * flag SCMD_LAST is set, or if the driver does not implement
  50         * commit_rqs.  The done() function must be called on the command
  51         * when the driver has finished with it. (you may call done on the
  52         * command before queuecommand returns, but in this case you
  53         * *must* return 0 from queuecommand).
  54         *
  55         * Queuecommand may also reject the command, in which case it may
  56         * not touch the command and must not call done() for it.
  57         *
  58         * There are two possible rejection returns:
  59         *
  60         *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
  61         *   allow commands to other devices serviced by this host.
  62         *
  63         *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
  64         *   host temporarily.
  65         *
  66         * For compatibility, any other non-zero return is treated the
  67         * same as SCSI_MLQUEUE_HOST_BUSY.
  68         *
  69         * NOTE: "temporarily" means either until the next command for#
  70         * this device/host completes, or a period of time determined by
  71         * I/O pressure in the system if there are no other outstanding
  72         * commands.
  73         *
  74         * STATUS: REQUIRED
  75         */
  76        int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
  77
  78        /*
  79         * The commit_rqs function is used to trigger a hardware
  80         * doorbell after some requests have been queued with
  81         * queuecommand, when an error is encountered before sending
  82         * the request with SCMD_LAST set.
  83         *
  84         * STATUS: OPTIONAL
  85         */
  86        void (*commit_rqs)(struct Scsi_Host *, u16);
  87
  88        struct module *module;
  89        const char *name;
  90
  91        /*
  92         * The info function will return whatever useful information the
  93         * developer sees fit.  If not provided, then the name field will
  94         * be used instead.
  95         *
  96         * Status: OPTIONAL
  97         */
  98        const char *(*info)(struct Scsi_Host *);
  99
 100        /*
 101         * Ioctl interface
 102         *
 103         * Status: OPTIONAL
 104         */
 105        int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
 106                     void __user *arg);
 107
 108
 109#ifdef CONFIG_COMPAT
 110        /*
 111         * Compat handler. Handle 32bit ABI.
 112         * When unknown ioctl is passed return -ENOIOCTLCMD.
 113         *
 114         * Status: OPTIONAL
 115         */
 116        int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
 117                            void __user *arg);
 118#endif
 119
 120        int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
 121        int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
 122
 123        /*
 124         * This is an error handling strategy routine.  You don't need to
 125         * define one of these if you don't want to - there is a default
 126         * routine that is present that should work in most cases.  For those
 127         * driver authors that have the inclination and ability to write their
 128         * own strategy routine, this is where it is specified.  Note - the
 129         * strategy routine is *ALWAYS* run in the context of the kernel eh
 130         * thread.  Thus you are guaranteed to *NOT* be in an interrupt
 131         * handler when you execute this, and you are also guaranteed to
 132         * *NOT* have any other commands being queued while you are in the
 133         * strategy routine. When you return from this function, operations
 134         * return to normal.
 135         *
 136         * See scsi_error.c scsi_unjam_host for additional comments about
 137         * what this function should and should not be attempting to do.
 138         *
 139         * Status: REQUIRED     (at least one of them)
 140         */
 141        int (* eh_abort_handler)(struct scsi_cmnd *);
 142        int (* eh_device_reset_handler)(struct scsi_cmnd *);
 143        int (* eh_target_reset_handler)(struct scsi_cmnd *);
 144        int (* eh_bus_reset_handler)(struct scsi_cmnd *);
 145        int (* eh_host_reset_handler)(struct scsi_cmnd *);
 146
 147        /*
 148         * Before the mid layer attempts to scan for a new device where none
 149         * currently exists, it will call this entry in your driver.  Should
 150         * your driver need to allocate any structs or perform any other init
 151         * items in order to send commands to a currently unused target/lun
 152         * combo, then this is where you can perform those allocations.  This
 153         * is specifically so that drivers won't have to perform any kind of
 154         * "is this a new device" checks in their queuecommand routine,
 155         * thereby making the hot path a bit quicker.
 156         *
 157         * Return values: 0 on success, non-0 on failure
 158         *
 159         * Deallocation:  If we didn't find any devices at this ID, you will
 160         * get an immediate call to slave_destroy().  If we find something
 161         * here then you will get a call to slave_configure(), then the
 162         * device will be used for however long it is kept around, then when
 163         * the device is removed from the system (or * possibly at reboot
 164         * time), you will then get a call to slave_destroy().  This is
 165         * assuming you implement slave_configure and slave_destroy.
 166         * However, if you allocate memory and hang it off the device struct,
 167         * then you must implement the slave_destroy() routine at a minimum
 168         * in order to avoid leaking memory
 169         * each time a device is tore down.
 170         *
 171         * Status: OPTIONAL
 172         */
 173        int (* slave_alloc)(struct scsi_device *);
 174
 175        /*
 176         * Once the device has responded to an INQUIRY and we know the
 177         * device is online, we call into the low level driver with the
 178         * struct scsi_device *.  If the low level device driver implements
 179         * this function, it *must* perform the task of setting the queue
 180         * depth on the device.  All other tasks are optional and depend
 181         * on what the driver supports and various implementation details.
 182         * 
 183         * Things currently recommended to be handled at this time include:
 184         *
 185         * 1.  Setting the device queue depth.  Proper setting of this is
 186         *     described in the comments for scsi_change_queue_depth.
 187         * 2.  Determining if the device supports the various synchronous
 188         *     negotiation protocols.  The device struct will already have
 189         *     responded to INQUIRY and the results of the standard items
 190         *     will have been shoved into the various device flag bits, eg.
 191         *     device->sdtr will be true if the device supports SDTR messages.
 192         * 3.  Allocating command structs that the device will need.
 193         * 4.  Setting the default timeout on this device (if needed).
 194         * 5.  Anything else the low level driver might want to do on a device
 195         *     specific setup basis...
 196         * 6.  Return 0 on success, non-0 on error.  The device will be marked
 197         *     as offline on error so that no access will occur.  If you return
 198         *     non-0, your slave_destroy routine will never get called for this
 199         *     device, so don't leave any loose memory hanging around, clean
 200         *     up after yourself before returning non-0
 201         *
 202         * Status: OPTIONAL
 203         */
 204        int (* slave_configure)(struct scsi_device *);
 205
 206        /*
 207         * Immediately prior to deallocating the device and after all activity
 208         * has ceased the mid layer calls this point so that the low level
 209         * driver may completely detach itself from the scsi device and vice
 210         * versa.  The low level driver is responsible for freeing any memory
 211         * it allocated in the slave_alloc or slave_configure calls. 
 212         *
 213         * Status: OPTIONAL
 214         */
 215        void (* slave_destroy)(struct scsi_device *);
 216
 217        /*
 218         * Before the mid layer attempts to scan for a new device attached
 219         * to a target where no target currently exists, it will call this
 220         * entry in your driver.  Should your driver need to allocate any
 221         * structs or perform any other init items in order to send commands
 222         * to a currently unused target, then this is where you can perform
 223         * those allocations.
 224         *
 225         * Return values: 0 on success, non-0 on failure
 226         *
 227         * Status: OPTIONAL
 228         */
 229        int (* target_alloc)(struct scsi_target *);
 230
 231        /*
 232         * Immediately prior to deallocating the target structure, and
 233         * after all activity to attached scsi devices has ceased, the
 234         * midlayer calls this point so that the driver may deallocate
 235         * and terminate any references to the target.
 236         *
 237         * Status: OPTIONAL
 238         */
 239        void (* target_destroy)(struct scsi_target *);
 240
 241        /*
 242         * If a host has the ability to discover targets on its own instead
 243         * of scanning the entire bus, it can fill in this function and
 244         * call scsi_scan_host().  This function will be called periodically
 245         * until it returns 1 with the scsi_host and the elapsed time of
 246         * the scan in jiffies.
 247         *
 248         * Status: OPTIONAL
 249         */
 250        int (* scan_finished)(struct Scsi_Host *, unsigned long);
 251
 252        /*
 253         * If the host wants to be called before the scan starts, but
 254         * after the midlayer has set up ready for the scan, it can fill
 255         * in this function.
 256         *
 257         * Status: OPTIONAL
 258         */
 259        void (* scan_start)(struct Scsi_Host *);
 260
 261        /*
 262         * Fill in this function to allow the queue depth of this host
 263         * to be changeable (on a per device basis).  Returns either
 264         * the current queue depth setting (may be different from what
 265         * was passed in) or an error.  An error should only be
 266         * returned if the requested depth is legal but the driver was
 267         * unable to set it.  If the requested depth is illegal, the
 268         * driver should set and return the closest legal queue depth.
 269         *
 270         * Status: OPTIONAL
 271         */
 272        int (* change_queue_depth)(struct scsi_device *, int);
 273
 274        /*
 275         * This functions lets the driver expose the queue mapping
 276         * to the block layer.
 277         *
 278         * Status: OPTIONAL
 279         */
 280        int (* map_queues)(struct Scsi_Host *shost);
 281
 282        /*
 283         * SCSI interface of blk_poll - poll for IO completions.
 284         * Only applicable if SCSI LLD exposes multiple h/w queues.
 285         *
 286         * Return value: Number of completed entries found.
 287         *
 288         * Status: OPTIONAL
 289         */
 290        int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
 291
 292        /*
 293         * Check if scatterlists need to be padded for DMA draining.
 294         *
 295         * Status: OPTIONAL
 296         */
 297        bool (* dma_need_drain)(struct request *rq);
 298
 299        /*
 300         * This function determines the BIOS parameters for a given
 301         * harddisk.  These tend to be numbers that are made up by
 302         * the host adapter.  Parameters:
 303         * size, device, list (heads, sectors, cylinders)
 304         *
 305         * Status: OPTIONAL
 306         */
 307        int (* bios_param)(struct scsi_device *, struct block_device *,
 308                        sector_t, int []);
 309
 310        /*
 311         * This function is called when one or more partitions on the
 312         * device reach beyond the end of the device.
 313         *
 314         * Status: OPTIONAL
 315         */
 316        void (*unlock_native_capacity)(struct scsi_device *);
 317
 318        /*
 319         * Can be used to export driver statistics and other infos to the
 320         * world outside the kernel ie. userspace and it also provides an
 321         * interface to feed the driver with information.
 322         *
 323         * Status: OBSOLETE
 324         */
 325        int (*show_info)(struct seq_file *, struct Scsi_Host *);
 326        int (*write_info)(struct Scsi_Host *, char *, int);
 327
 328        /*
 329         * This is an optional routine that allows the transport to become
 330         * involved when a scsi io timer fires. The return value tells the
 331         * timer routine how to finish the io timeout handling.
 332         *
 333         * Status: OPTIONAL
 334         */
 335        enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
 336        /*
 337         * Optional routine that allows the transport to decide if a cmd
 338         * is retryable. Return true if the transport is in a state the
 339         * cmd should be retried on.
 340         */
 341        bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
 342
 343        /* This is an optional routine that allows transport to initiate
 344         * LLD adapter or firmware reset using sysfs attribute.
 345         *
 346         * Return values: 0 on success, -ve value on failure.
 347         *
 348         * Status: OPTIONAL
 349         */
 350
 351        int (*host_reset)(struct Scsi_Host *shost, int reset_type);
 352#define SCSI_ADAPTER_RESET      1
 353#define SCSI_FIRMWARE_RESET     2
 354
 355
 356        /*
 357         * Name of proc directory
 358         */
 359        const char *proc_name;
 360
 361        /*
 362         * Used to store the procfs directory if a driver implements the
 363         * show_info method.
 364         */
 365        struct proc_dir_entry *proc_dir;
 366
 367        /*
 368         * This determines if we will use a non-interrupt driven
 369         * or an interrupt driven scheme.  It is set to the maximum number
 370         * of simultaneous commands a single hw queue in HBA will accept.
 371         */
 372        int can_queue;
 373
 374        /*
 375         * In many instances, especially where disconnect / reconnect are
 376         * supported, our host also has an ID on the SCSI bus.  If this is
 377         * the case, then it must be reserved.  Please set this_id to -1 if
 378         * your setup is in single initiator mode, and the host lacks an
 379         * ID.
 380         */
 381        int this_id;
 382
 383        /*
 384         * This determines the degree to which the host adapter is capable
 385         * of scatter-gather.
 386         */
 387        unsigned short sg_tablesize;
 388        unsigned short sg_prot_tablesize;
 389
 390        /*
 391         * Set this if the host adapter has limitations beside segment count.
 392         */
 393        unsigned int max_sectors;
 394
 395        /*
 396         * Maximum size in bytes of a single segment.
 397         */
 398        unsigned int max_segment_size;
 399
 400        /*
 401         * DMA scatter gather segment boundary limit. A segment crossing this
 402         * boundary will be split in two.
 403         */
 404        unsigned long dma_boundary;
 405
 406        unsigned long virt_boundary_mask;
 407
 408        /*
 409         * This specifies "machine infinity" for host templates which don't
 410         * limit the transfer size.  Note this limit represents an absolute
 411         * maximum, and may be over the transfer limits allowed for
 412         * individual devices (e.g. 256 for SCSI-1).
 413         */
 414#define SCSI_DEFAULT_MAX_SECTORS        1024
 415
 416        /*
 417         * True if this host adapter can make good use of linked commands.
 418         * This will allow more than one command to be queued to a given
 419         * unit on a given host.  Set this to the maximum number of command
 420         * blocks to be provided for each device.  Set this to 1 for one
 421         * command block per lun, 2 for two, etc.  Do not set this to 0.
 422         * You should make sure that the host adapter will do the right thing
 423         * before you try setting this above 1.
 424         */
 425        short cmd_per_lun;
 426
 427        /*
 428         * present contains counter indicating how many boards of this
 429         * type were found when we did the scan.
 430         */
 431        unsigned char present;
 432
 433        /* If use block layer to manage tags, this is tag allocation policy */
 434        int tag_alloc_policy;
 435
 436        /*
 437         * Track QUEUE_FULL events and reduce queue depth on demand.
 438         */
 439        unsigned track_queue_depth:1;
 440
 441        /*
 442         * This specifies the mode that a LLD supports.
 443         */
 444        unsigned supported_mode:2;
 445
 446        /*
 447         * True for emulated SCSI host adapters (e.g. ATAPI).
 448         */
 449        unsigned emulated:1;
 450
 451        /*
 452         * True if the low-level driver performs its own reset-settle delays.
 453         */
 454        unsigned skip_settle_delay:1;
 455
 456        /* True if the controller does not support WRITE SAME */
 457        unsigned no_write_same:1;
 458
 459        /* True if the host uses host-wide tagspace */
 460        unsigned host_tagset:1;
 461
 462        /*
 463         * Countdown for host blocking with no commands outstanding.
 464         */
 465        unsigned int max_host_blocked;
 466
 467        /*
 468         * Default value for the blocking.  If the queue is empty,
 469         * host_blocked counts down in the request_fn until it restarts
 470         * host operations as zero is reached.  
 471         *
 472         * FIXME: This should probably be a value in the template
 473         */
 474#define SCSI_DEFAULT_HOST_BLOCKED       7
 475
 476        /*
 477         * Pointer to the sysfs class properties for this host, NULL terminated.
 478         */
 479        struct device_attribute **shost_attrs;
 480
 481        /*
 482         * Pointer to the SCSI device properties for this host, NULL terminated.
 483         */
 484        struct device_attribute **sdev_attrs;
 485
 486        /*
 487         * Pointer to the SCSI device attribute groups for this host,
 488         * NULL terminated.
 489         */
 490        const struct attribute_group **sdev_groups;
 491
 492        /*
 493         * Vendor Identifier associated with the host
 494         *
 495         * Note: When specifying vendor_id, be sure to read the
 496         *   Vendor Type and ID formatting requirements specified in
 497         *   scsi_netlink.h
 498         */
 499        u64 vendor_id;
 500
 501        struct scsi_host_cmd_pool *cmd_pool;
 502
 503        /* Delay for runtime autosuspend */
 504        int rpm_autosuspend_delay;
 505};
 506
 507/*
 508 * Temporary #define for host lock push down. Can be removed when all
 509 * drivers have been updated to take advantage of unlocked
 510 * queuecommand.
 511 *
 512 */
 513#define DEF_SCSI_QCMD(func_name) \
 514        int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)   \
 515        {                                                               \
 516                unsigned long irq_flags;                                \
 517                int rc;                                                 \
 518                spin_lock_irqsave(shost->host_lock, irq_flags);         \
 519                rc = func_name##_lck (cmd, cmd->scsi_done);                     \
 520                spin_unlock_irqrestore(shost->host_lock, irq_flags);    \
 521                return rc;                                              \
 522        }
 523
 524
 525/*
 526 * shost state: If you alter this, you also need to alter scsi_sysfs.c
 527 * (for the ascii descriptions) and the state model enforcer:
 528 * scsi_host_set_state()
 529 */
 530enum scsi_host_state {
 531        SHOST_CREATED = 1,
 532        SHOST_RUNNING,
 533        SHOST_CANCEL,
 534        SHOST_DEL,
 535        SHOST_RECOVERY,
 536        SHOST_CANCEL_RECOVERY,
 537        SHOST_DEL_RECOVERY,
 538};
 539
 540struct Scsi_Host {
 541        /*
 542         * __devices is protected by the host_lock, but you should
 543         * usually use scsi_device_lookup / shost_for_each_device
 544         * to access it and don't care about locking yourself.
 545         * In the rare case of being in irq context you can use
 546         * their __ prefixed variants with the lock held. NEVER
 547         * access this list directly from a driver.
 548         */
 549        struct list_head        __devices;
 550        struct list_head        __targets;
 551        
 552        struct list_head        starved_list;
 553
 554        spinlock_t              default_lock;
 555        spinlock_t              *host_lock;
 556
 557        struct mutex            scan_mutex;/* serialize scanning activity */
 558
 559        struct list_head        eh_cmd_q;
 560        struct task_struct    * ehandler;  /* Error recovery thread. */
 561        struct completion     * eh_action; /* Wait for specific actions on the
 562                                              host. */
 563        wait_queue_head_t       host_wait;
 564        struct scsi_host_template *hostt;
 565        struct scsi_transport_template *transportt;
 566
 567        /* Area to keep a shared tag map */
 568        struct blk_mq_tag_set   tag_set;
 569
 570        atomic_t host_blocked;
 571
 572        unsigned int host_failed;          /* commands that failed.
 573                                              protected by host_lock */
 574        unsigned int host_eh_scheduled;    /* EH scheduled without command */
 575    
 576        unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
 577
 578        /* next two fields are used to bound the time spent in error handling */
 579        int eh_deadline;
 580        unsigned long last_reset;
 581
 582
 583        /*
 584         * These three parameters can be used to allow for wide scsi,
 585         * and for host adapters that support multiple busses
 586         * The last two should be set to 1 more than the actual max id
 587         * or lun (e.g. 8 for SCSI parallel systems).
 588         */
 589        unsigned int max_channel;
 590        unsigned int max_id;
 591        u64 max_lun;
 592
 593        /*
 594         * This is a unique identifier that must be assigned so that we
 595         * have some way of identifying each detected host adapter properly
 596         * and uniquely.  For hosts that do not support more than one card
 597         * in the system at one time, this does not need to be set.  It is
 598         * initialized to 0 in scsi_register.
 599         */
 600        unsigned int unique_id;
 601
 602        /*
 603         * The maximum length of SCSI commands that this host can accept.
 604         * Probably 12 for most host adapters, but could be 16 for others.
 605         * or 260 if the driver supports variable length cdbs.
 606         * For drivers that don't set this field, a value of 12 is
 607         * assumed.
 608         */
 609        unsigned short max_cmd_len;
 610
 611        int this_id;
 612        int can_queue;
 613        short cmd_per_lun;
 614        short unsigned int sg_tablesize;
 615        short unsigned int sg_prot_tablesize;
 616        unsigned int max_sectors;
 617        unsigned int max_segment_size;
 618        unsigned long dma_boundary;
 619        unsigned long virt_boundary_mask;
 620        /*
 621         * In scsi-mq mode, the number of hardware queues supported by the LLD.
 622         *
 623         * Note: it is assumed that each hardware queue has a queue depth of
 624         * can_queue. In other words, the total queue depth per host
 625         * is nr_hw_queues * can_queue. However, for when host_tagset is set,
 626         * the total queue depth is can_queue.
 627         */
 628        unsigned nr_hw_queues;
 629        unsigned nr_maps;
 630        unsigned active_mode:2;
 631
 632        /*
 633         * Host has requested that no further requests come through for the
 634         * time being.
 635         */
 636        unsigned host_self_blocked:1;
 637    
 638        /*
 639         * Host uses correct SCSI ordering not PC ordering. The bit is
 640         * set for the minority of drivers whose authors actually read
 641         * the spec ;).
 642         */
 643        unsigned reverse_ordering:1;
 644
 645        /* Task mgmt function in progress */
 646        unsigned tmf_in_progress:1;
 647
 648        /* Asynchronous scan in progress */
 649        unsigned async_scan:1;
 650
 651        /* Don't resume host in EH */
 652        unsigned eh_noresume:1;
 653
 654        /* The controller does not support WRITE SAME */
 655        unsigned no_write_same:1;
 656
 657        /* True if the host uses host-wide tagspace */
 658        unsigned host_tagset:1;
 659
 660        /* Host responded with short (<36 bytes) INQUIRY result */
 661        unsigned short_inquiry:1;
 662
 663        /* The transport requires the LUN bits NOT to be stored in CDB[1] */
 664        unsigned no_scsi2_lun_in_cdb:1;
 665
 666        /*
 667         * Optional work queue to be utilized by the transport
 668         */
 669        char work_q_name[20];
 670        struct workqueue_struct *work_q;
 671
 672        /*
 673         * Task management function work queue
 674         */
 675        struct workqueue_struct *tmf_work_q;
 676
 677        /*
 678         * Value host_blocked counts down from
 679         */
 680        unsigned int max_host_blocked;
 681
 682        /* Protection Information */
 683        unsigned int prot_capabilities;
 684        unsigned char prot_guard_type;
 685
 686        /* legacy crap */
 687        unsigned long base;
 688        unsigned long io_port;
 689        unsigned char n_io_port;
 690        unsigned char dma_channel;
 691        unsigned int  irq;
 692        
 693
 694        enum scsi_host_state shost_state;
 695
 696        /* ldm bits */
 697        struct device           shost_gendev, shost_dev;
 698
 699        /*
 700         * Points to the transport data (if any) which is allocated
 701         * separately
 702         */
 703        void *shost_data;
 704
 705        /*
 706         * Points to the physical bus device we'd use to do DMA
 707         * Needed just in case we have virtual hosts.
 708         */
 709        struct device *dma_dev;
 710
 711        /*
 712         * We should ensure that this is aligned, both for better performance
 713         * and also because some compilers (m68k) don't automatically force
 714         * alignment to a long boundary.
 715         */
 716        unsigned long hostdata[]  /* Used for storage of host specific stuff */
 717                __attribute__ ((aligned (sizeof(unsigned long))));
 718};
 719
 720#define         class_to_shost(d)       \
 721        container_of(d, struct Scsi_Host, shost_dev)
 722
 723#define shost_printk(prefix, shost, fmt, a...)  \
 724        dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
 725
 726static inline void *shost_priv(struct Scsi_Host *shost)
 727{
 728        return (void *)shost->hostdata;
 729}
 730
 731int scsi_is_host_device(const struct device *);
 732
 733static inline struct Scsi_Host *dev_to_shost(struct device *dev)
 734{
 735        while (!scsi_is_host_device(dev)) {
 736                if (!dev->parent)
 737                        return NULL;
 738                dev = dev->parent;
 739        }
 740        return container_of(dev, struct Scsi_Host, shost_gendev);
 741}
 742
 743static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
 744{
 745        return shost->shost_state == SHOST_RECOVERY ||
 746                shost->shost_state == SHOST_CANCEL_RECOVERY ||
 747                shost->shost_state == SHOST_DEL_RECOVERY ||
 748                shost->tmf_in_progress;
 749}
 750
 751extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
 752extern void scsi_flush_work(struct Scsi_Host *);
 753
 754extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
 755extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
 756                                               struct device *,
 757                                               struct device *);
 758extern void scsi_scan_host(struct Scsi_Host *);
 759extern void scsi_rescan_device(struct device *);
 760extern void scsi_remove_host(struct Scsi_Host *);
 761extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
 762extern int scsi_host_busy(struct Scsi_Host *shost);
 763extern void scsi_host_put(struct Scsi_Host *t);
 764extern struct Scsi_Host *scsi_host_lookup(unsigned short);
 765extern const char *scsi_host_state_name(enum scsi_host_state);
 766extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
 767                                            enum scsi_host_status status);
 768
 769static inline int __must_check scsi_add_host(struct Scsi_Host *host,
 770                                             struct device *dev)
 771{
 772        return scsi_add_host_with_dma(host, dev, dev);
 773}
 774
 775static inline struct device *scsi_get_device(struct Scsi_Host *shost)
 776{
 777        return shost->shost_gendev.parent;
 778}
 779
 780/**
 781 * scsi_host_scan_allowed - Is scanning of this host allowed
 782 * @shost:      Pointer to Scsi_Host.
 783 **/
 784static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
 785{
 786        return shost->shost_state == SHOST_RUNNING ||
 787               shost->shost_state == SHOST_RECOVERY;
 788}
 789
 790extern void scsi_unblock_requests(struct Scsi_Host *);
 791extern void scsi_block_requests(struct Scsi_Host *);
 792extern int scsi_host_block(struct Scsi_Host *shost);
 793extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
 794
 795void scsi_host_busy_iter(struct Scsi_Host *,
 796                         bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv);
 797
 798struct class_container;
 799
 800/*
 801 * These two functions are used to allocate and free a pseudo device
 802 * which will connect to the host adapter itself rather than any
 803 * physical device.  You must deallocate when you are done with the
 804 * thing.  This physical pseudo-device isn't real and won't be available
 805 * from any high-level drivers.
 806 */
 807extern void scsi_free_host_dev(struct scsi_device *);
 808extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
 809
 810/*
 811 * DIF defines the exchange of protection information between
 812 * initiator and SBC block device.
 813 *
 814 * DIX defines the exchange of protection information between OS and
 815 * initiator.
 816 */
 817enum scsi_host_prot_capabilities {
 818        SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
 819        SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
 820        SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
 821
 822        SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
 823        SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
 824        SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
 825        SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
 826};
 827
 828/*
 829 * SCSI hosts which support the Data Integrity Extensions must
 830 * indicate their capabilities by setting the prot_capabilities using
 831 * this call.
 832 */
 833static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
 834{
 835        shost->prot_capabilities = mask;
 836}
 837
 838static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
 839{
 840        return shost->prot_capabilities;
 841}
 842
 843static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
 844{
 845        return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
 846}
 847
 848static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
 849{
 850        static unsigned char cap[] = { 0,
 851                                       SHOST_DIF_TYPE1_PROTECTION,
 852                                       SHOST_DIF_TYPE2_PROTECTION,
 853                                       SHOST_DIF_TYPE3_PROTECTION };
 854
 855        if (target_type >= ARRAY_SIZE(cap))
 856                return 0;
 857
 858        return shost->prot_capabilities & cap[target_type] ? target_type : 0;
 859}
 860
 861static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
 862{
 863#if defined(CONFIG_BLK_DEV_INTEGRITY)
 864        static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
 865                                       SHOST_DIX_TYPE1_PROTECTION,
 866                                       SHOST_DIX_TYPE2_PROTECTION,
 867                                       SHOST_DIX_TYPE3_PROTECTION };
 868
 869        if (target_type >= ARRAY_SIZE(cap))
 870                return 0;
 871
 872        return shost->prot_capabilities & cap[target_type];
 873#endif
 874        return 0;
 875}
 876
 877/*
 878 * All DIX-capable initiators must support the T10-mandated CRC
 879 * checksum.  Controllers can optionally implement the IP checksum
 880 * scheme which has much lower impact on system performance.  Note
 881 * that the main rationale for the checksum is to match integrity
 882 * metadata with data.  Detecting bit errors are a job for ECC memory
 883 * and buses.
 884 */
 885
 886enum scsi_host_guard_type {
 887        SHOST_DIX_GUARD_CRC = 1 << 0,
 888        SHOST_DIX_GUARD_IP  = 1 << 1,
 889};
 890
 891static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
 892{
 893        shost->prot_guard_type = type;
 894}
 895
 896static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
 897{
 898        return shost->prot_guard_type;
 899}
 900
 901extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
 902
 903#endif /* _SCSI_SCSI_HOST_H */
 904