linux/drivers/nvme/target/nvmet.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
   4 */
   5
   6#ifndef _NVMET_H
   7#define _NVMET_H
   8
   9#include <linux/dma-mapping.h>
  10#include <linux/types.h>
  11#include <linux/device.h>
  12#include <linux/kref.h>
  13#include <linux/percpu-refcount.h>
  14#include <linux/list.h>
  15#include <linux/mutex.h>
  16#include <linux/uuid.h>
  17#include <linux/nvme.h>
  18#include <linux/configfs.h>
  19#include <linux/rcupdate.h>
  20#include <linux/blkdev.h>
  21#include <linux/radix-tree.h>
  22#include <linux/t10-pi.h>
  23
  24#define NVMET_DEFAULT_VS                NVME_VS(1, 3, 0)
  25
  26#define NVMET_ASYNC_EVENTS              4
  27#define NVMET_ERROR_LOG_SLOTS           128
  28#define NVMET_NO_ERROR_LOC              ((u16)-1)
  29#define NVMET_DEFAULT_CTRL_MODEL        "Linux"
  30#define NVMET_MN_MAX_SIZE               40
  31
  32/*
  33 * Supported optional AENs:
  34 */
  35#define NVMET_AEN_CFG_OPTIONAL \
  36        (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
  37#define NVMET_DISC_AEN_CFG_OPTIONAL \
  38        (NVME_AEN_CFG_DISC_CHANGE)
  39
  40/*
  41 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
  42 */
  43#define NVMET_AEN_CFG_ALL \
  44        (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
  45         NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
  46         NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
  47
  48/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
  49 * The 16 bit shift is to set IATTR bit to 1, which means offending
  50 * offset starts in the data section of connect()
  51 */
  52#define IPO_IATTR_CONNECT_DATA(x)       \
  53        (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
  54#define IPO_IATTR_CONNECT_SQE(x)        \
  55        (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
  56
  57struct nvmet_ns {
  58        struct percpu_ref       ref;
  59        struct block_device     *bdev;
  60        struct file             *file;
  61        bool                    readonly;
  62        u32                     nsid;
  63        u32                     blksize_shift;
  64        loff_t                  size;
  65        u8                      nguid[16];
  66        uuid_t                  uuid;
  67        u32                     anagrpid;
  68
  69        bool                    buffered_io;
  70        bool                    enabled;
  71        struct nvmet_subsys     *subsys;
  72        const char              *device_path;
  73
  74        struct config_group     device_group;
  75        struct config_group     group;
  76
  77        struct completion       disable_done;
  78        mempool_t               *bvec_pool;
  79        struct kmem_cache       *bvec_cache;
  80
  81        int                     use_p2pmem;
  82        struct pci_dev          *p2p_dev;
  83        int                     pi_type;
  84        int                     metadata_size;
  85};
  86
  87static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
  88{
  89        return container_of(to_config_group(item), struct nvmet_ns, group);
  90}
  91
  92static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
  93{
  94        return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
  95}
  96
  97struct nvmet_cq {
  98        u16                     qid;
  99        u16                     size;
 100};
 101
 102struct nvmet_sq {
 103        struct nvmet_ctrl       *ctrl;
 104        struct percpu_ref       ref;
 105        u16                     qid;
 106        u16                     size;
 107        u32                     sqhd;
 108        bool                    sqhd_disabled;
 109        struct completion       free_done;
 110        struct completion       confirm_done;
 111};
 112
 113struct nvmet_ana_group {
 114        struct config_group     group;
 115        struct nvmet_port       *port;
 116        u32                     grpid;
 117};
 118
 119static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
 120{
 121        return container_of(to_config_group(item), struct nvmet_ana_group,
 122                        group);
 123}
 124
 125/**
 126 * struct nvmet_port -  Common structure to keep port
 127 *                              information for the target.
 128 * @entry:              Entry into referrals or transport list.
 129 * @disc_addr:          Address information is stored in a format defined
 130 *                              for a discovery log page entry.
 131 * @group:              ConfigFS group for this element's folder.
 132 * @priv:               Private data for the transport.
 133 */
 134struct nvmet_port {
 135        struct list_head                entry;
 136        struct nvmf_disc_rsp_page_entry disc_addr;
 137        struct config_group             group;
 138        struct config_group             subsys_group;
 139        struct list_head                subsystems;
 140        struct config_group             referrals_group;
 141        struct list_head                referrals;
 142        struct list_head                global_entry;
 143        struct config_group             ana_groups_group;
 144        struct nvmet_ana_group          ana_default_group;
 145        enum nvme_ana_state             *ana_state;
 146        void                            *priv;
 147        bool                            enabled;
 148        int                             inline_data_size;
 149        const struct nvmet_fabrics_ops  *tr_ops;
 150        bool                            pi_enable;
 151};
 152
 153static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
 154{
 155        return container_of(to_config_group(item), struct nvmet_port,
 156                        group);
 157}
 158
 159static inline struct nvmet_port *ana_groups_to_port(
 160                struct config_item *item)
 161{
 162        return container_of(to_config_group(item), struct nvmet_port,
 163                        ana_groups_group);
 164}
 165
 166struct nvmet_ctrl {
 167        struct nvmet_subsys     *subsys;
 168        struct nvmet_sq         **sqs;
 169
 170        bool                    reset_tbkas;
 171
 172        struct mutex            lock;
 173        u64                     cap;
 174        u32                     cc;
 175        u32                     csts;
 176
 177        uuid_t                  hostid;
 178        u16                     cntlid;
 179        u32                     kato;
 180
 181        struct nvmet_port       *port;
 182
 183        u32                     aen_enabled;
 184        unsigned long           aen_masked;
 185        struct nvmet_req        *async_event_cmds[NVMET_ASYNC_EVENTS];
 186        unsigned int            nr_async_event_cmds;
 187        struct list_head        async_events;
 188        struct work_struct      async_event_work;
 189
 190        struct list_head        subsys_entry;
 191        struct kref             ref;
 192        struct delayed_work     ka_work;
 193        struct work_struct      fatal_err_work;
 194
 195        const struct nvmet_fabrics_ops *ops;
 196
 197        __le32                  *changed_ns_list;
 198        u32                     nr_changed_ns;
 199
 200        char                    subsysnqn[NVMF_NQN_FIELD_LEN];
 201        char                    hostnqn[NVMF_NQN_FIELD_LEN];
 202
 203        struct device           *p2p_client;
 204        struct radix_tree_root  p2p_ns_map;
 205
 206        spinlock_t              error_lock;
 207        u64                     err_counter;
 208        struct nvme_error_slot  slots[NVMET_ERROR_LOG_SLOTS];
 209        bool                    pi_support;
 210};
 211
 212struct nvmet_subsys {
 213        enum nvme_subsys_type   type;
 214
 215        struct mutex            lock;
 216        struct kref             ref;
 217
 218        struct xarray           namespaces;
 219        unsigned int            nr_namespaces;
 220        unsigned int            max_nsid;
 221        u16                     cntlid_min;
 222        u16                     cntlid_max;
 223
 224        struct list_head        ctrls;
 225
 226        struct list_head        hosts;
 227        bool                    allow_any_host;
 228
 229        u16                     max_qid;
 230
 231        u64                     ver;
 232        u64                     serial;
 233        char                    *subsysnqn;
 234        bool                    pi_support;
 235
 236        struct config_group     group;
 237
 238        struct config_group     namespaces_group;
 239        struct config_group     allowed_hosts_group;
 240
 241        char                    *model_number;
 242
 243#ifdef CONFIG_NVME_TARGET_PASSTHRU
 244        struct nvme_ctrl        *passthru_ctrl;
 245        char                    *passthru_ctrl_path;
 246        struct config_group     passthru_group;
 247        unsigned int            admin_timeout;
 248        unsigned int            io_timeout;
 249#endif /* CONFIG_NVME_TARGET_PASSTHRU */
 250};
 251
 252static inline struct nvmet_subsys *to_subsys(struct config_item *item)
 253{
 254        return container_of(to_config_group(item), struct nvmet_subsys, group);
 255}
 256
 257static inline struct nvmet_subsys *namespaces_to_subsys(
 258                struct config_item *item)
 259{
 260        return container_of(to_config_group(item), struct nvmet_subsys,
 261                        namespaces_group);
 262}
 263
 264struct nvmet_host {
 265        struct config_group     group;
 266};
 267
 268static inline struct nvmet_host *to_host(struct config_item *item)
 269{
 270        return container_of(to_config_group(item), struct nvmet_host, group);
 271}
 272
 273static inline char *nvmet_host_name(struct nvmet_host *host)
 274{
 275        return config_item_name(&host->group.cg_item);
 276}
 277
 278struct nvmet_host_link {
 279        struct list_head        entry;
 280        struct nvmet_host       *host;
 281};
 282
 283struct nvmet_subsys_link {
 284        struct list_head        entry;
 285        struct nvmet_subsys     *subsys;
 286};
 287
 288struct nvmet_req;
 289struct nvmet_fabrics_ops {
 290        struct module *owner;
 291        unsigned int type;
 292        unsigned int msdbd;
 293        unsigned int flags;
 294#define NVMF_KEYED_SGLS                 (1 << 0)
 295#define NVMF_METADATA_SUPPORTED         (1 << 1)
 296        void (*queue_response)(struct nvmet_req *req);
 297        int (*add_port)(struct nvmet_port *port);
 298        void (*remove_port)(struct nvmet_port *port);
 299        void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
 300        void (*disc_traddr)(struct nvmet_req *req,
 301                        struct nvmet_port *port, char *traddr);
 302        u16 (*install_queue)(struct nvmet_sq *nvme_sq);
 303        void (*discovery_chg)(struct nvmet_port *port);
 304        u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
 305};
 306
 307#define NVMET_MAX_INLINE_BIOVEC 8
 308#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
 309
 310struct nvmet_req {
 311        struct nvme_command     *cmd;
 312        struct nvme_completion  *cqe;
 313        struct nvmet_sq         *sq;
 314        struct nvmet_cq         *cq;
 315        struct nvmet_ns         *ns;
 316        struct scatterlist      *sg;
 317        struct scatterlist      *metadata_sg;
 318        struct bio_vec          inline_bvec[NVMET_MAX_INLINE_BIOVEC];
 319        union {
 320                struct {
 321                        struct bio      inline_bio;
 322                } b;
 323                struct {
 324                        bool                    mpool_alloc;
 325                        struct kiocb            iocb;
 326                        struct bio_vec          *bvec;
 327                        struct work_struct      work;
 328                } f;
 329                struct {
 330                        struct bio              inline_bio;
 331                        struct request          *rq;
 332                        struct work_struct      work;
 333                        bool                    use_workqueue;
 334                } p;
 335        };
 336        int                     sg_cnt;
 337        int                     metadata_sg_cnt;
 338        /* data length as parsed from the SGL descriptor: */
 339        size_t                  transfer_len;
 340        size_t                  metadata_len;
 341
 342        struct nvmet_port       *port;
 343
 344        void (*execute)(struct nvmet_req *req);
 345        const struct nvmet_fabrics_ops *ops;
 346
 347        struct pci_dev          *p2p_dev;
 348        struct device           *p2p_client;
 349        u16                     error_loc;
 350        u64                     error_slba;
 351};
 352
 353extern struct workqueue_struct *buffered_io_wq;
 354
 355static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
 356{
 357        req->cqe->result.u32 = cpu_to_le32(result);
 358}
 359
 360/*
 361 * NVMe command writes actually are DMA reads for us on the target side.
 362 */
 363static inline enum dma_data_direction
 364nvmet_data_dir(struct nvmet_req *req)
 365{
 366        return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 367}
 368
 369struct nvmet_async_event {
 370        struct list_head        entry;
 371        u8                      event_type;
 372        u8                      event_info;
 373        u8                      log_page;
 374};
 375
 376static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
 377{
 378        int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
 379
 380        if (!rae)
 381                clear_bit(bn, &req->sq->ctrl->aen_masked);
 382}
 383
 384static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
 385{
 386        if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
 387                return true;
 388        return test_and_set_bit(bn, &ctrl->aen_masked);
 389}
 390
 391void nvmet_get_feat_kato(struct nvmet_req *req);
 392void nvmet_get_feat_async_event(struct nvmet_req *req);
 393u16 nvmet_set_feat_kato(struct nvmet_req *req);
 394u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
 395void nvmet_execute_async_event(struct nvmet_req *req);
 396void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
 397void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
 398
 399u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
 400void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
 401u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
 402u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
 403u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
 404u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
 405u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
 406
 407bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
 408                struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
 409void nvmet_req_uninit(struct nvmet_req *req);
 410bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
 411bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
 412void nvmet_req_complete(struct nvmet_req *req, u16 status);
 413int nvmet_req_alloc_sgls(struct nvmet_req *req);
 414void nvmet_req_free_sgls(struct nvmet_req *req);
 415
 416void nvmet_execute_set_features(struct nvmet_req *req);
 417void nvmet_execute_get_features(struct nvmet_req *req);
 418void nvmet_execute_keep_alive(struct nvmet_req *req);
 419
 420void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
 421                u16 size);
 422void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
 423                u16 size);
 424void nvmet_sq_destroy(struct nvmet_sq *sq);
 425int nvmet_sq_init(struct nvmet_sq *sq);
 426
 427void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
 428
 429void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
 430u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
 431                struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
 432struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
 433                                       const char *hostnqn, u16 cntlid,
 434                                       struct nvmet_req *req);
 435void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
 436u16 nvmet_check_ctrl_status(struct nvmet_req *req);
 437
 438struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
 439                enum nvme_subsys_type type);
 440void nvmet_subsys_put(struct nvmet_subsys *subsys);
 441void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
 442
 443u16 nvmet_req_find_ns(struct nvmet_req *req);
 444void nvmet_put_namespace(struct nvmet_ns *ns);
 445int nvmet_ns_enable(struct nvmet_ns *ns);
 446void nvmet_ns_disable(struct nvmet_ns *ns);
 447struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
 448void nvmet_ns_free(struct nvmet_ns *ns);
 449
 450void nvmet_send_ana_event(struct nvmet_subsys *subsys,
 451                struct nvmet_port *port);
 452void nvmet_port_send_ana_event(struct nvmet_port *port);
 453
 454int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
 455void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
 456
 457void nvmet_port_del_ctrls(struct nvmet_port *port,
 458                          struct nvmet_subsys *subsys);
 459
 460int nvmet_enable_port(struct nvmet_port *port);
 461void nvmet_disable_port(struct nvmet_port *port);
 462
 463void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
 464void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
 465
 466u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
 467                size_t len);
 468u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
 469                size_t len);
 470u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
 471
 472u32 nvmet_get_log_page_len(struct nvme_command *cmd);
 473u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
 474
 475extern struct list_head *nvmet_ports;
 476void nvmet_port_disc_changed(struct nvmet_port *port,
 477                struct nvmet_subsys *subsys);
 478void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
 479                struct nvmet_host *host);
 480void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
 481                u8 event_info, u8 log_page);
 482
 483#define NVMET_QUEUE_SIZE        1024
 484#define NVMET_NR_QUEUES         128
 485#define NVMET_MAX_CMD           NVMET_QUEUE_SIZE
 486
 487/*
 488 * Nice round number that makes a list of nsids fit into a page.
 489 * Should become tunable at some point in the future.
 490 */
 491#define NVMET_MAX_NAMESPACES    1024
 492
 493/*
 494 * 0 is not a valid ANA group ID, so we start numbering at 1.
 495 *
 496 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
 497 * by default, and is available in an optimized state through all ports.
 498 */
 499#define NVMET_MAX_ANAGRPS       128
 500#define NVMET_DEFAULT_ANA_GRPID 1
 501
 502#define NVMET_KAS               10
 503#define NVMET_DISC_KATO_MS              120000
 504
 505int __init nvmet_init_configfs(void);
 506void __exit nvmet_exit_configfs(void);
 507
 508int __init nvmet_init_discovery(void);
 509void nvmet_exit_discovery(void);
 510
 511extern struct nvmet_subsys *nvmet_disc_subsys;
 512extern struct rw_semaphore nvmet_config_sem;
 513
 514extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
 515extern u64 nvmet_ana_chgcnt;
 516extern struct rw_semaphore nvmet_ana_sem;
 517
 518bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
 519
 520int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
 521int nvmet_file_ns_enable(struct nvmet_ns *ns);
 522void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
 523void nvmet_file_ns_disable(struct nvmet_ns *ns);
 524u16 nvmet_bdev_flush(struct nvmet_req *req);
 525u16 nvmet_file_flush(struct nvmet_req *req);
 526void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
 527void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
 528int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
 529void nvmet_ns_revalidate(struct nvmet_ns *ns);
 530
 531static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
 532{
 533        return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
 534                        req->ns->blksize_shift;
 535}
 536
 537static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
 538{
 539        if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
 540                return 0;
 541        return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
 542                        req->ns->metadata_size;
 543}
 544
 545static inline u32 nvmet_dsm_len(struct nvmet_req *req)
 546{
 547        return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
 548                sizeof(struct nvme_dsm_range);
 549}
 550
 551static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
 552{
 553        return req->sq->ctrl->subsys;
 554}
 555
 556#ifdef CONFIG_NVME_TARGET_PASSTHRU
 557void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
 558int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
 559void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
 560u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
 561u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
 562static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
 563{
 564        return subsys->passthru_ctrl;
 565}
 566#else /* CONFIG_NVME_TARGET_PASSTHRU */
 567static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
 568{
 569}
 570static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
 571{
 572}
 573static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
 574{
 575        return 0;
 576}
 577static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
 578{
 579        return 0;
 580}
 581static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
 582{
 583        return NULL;
 584}
 585#endif /* CONFIG_NVME_TARGET_PASSTHRU */
 586
 587static inline struct nvme_ctrl *
 588nvmet_req_passthru_ctrl(struct nvmet_req *req)
 589{
 590        return nvmet_passthru_ctrl(nvmet_req_subsys(req));
 591}
 592
 593u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
 594u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
 595
 596/* Convert a 32-bit number to a 16-bit 0's based number */
 597static inline __le16 to0based(u32 a)
 598{
 599        return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
 600}
 601
 602static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
 603{
 604        if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
 605                return false;
 606        return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
 607}
 608
 609static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
 610{
 611        return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
 612}
 613
 614static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
 615{
 616        return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
 617}
 618
 619static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
 620{
 621        return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
 622               req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
 623}
 624
 625#endif /* _NVMET_H */
 626