linux/block/t10-pi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * t10_pi.c - Functions for generating and verifying T10 Protection
   4 *            Information.
   5 */
   6
   7#include <linux/t10-pi.h>
   8#include <linux/blkdev.h>
   9#include <linux/crc-t10dif.h>
  10#include <linux/module.h>
  11#include <net/checksum.h>
  12
  13typedef __be16 (csum_fn) (void *, unsigned int);
  14
  15static __be16 t10_pi_crc_fn(void *data, unsigned int len)
  16{
  17        return cpu_to_be16(crc_t10dif(data, len));
  18}
  19
  20static __be16 t10_pi_ip_fn(void *data, unsigned int len)
  21{
  22        return (__force __be16)ip_compute_csum(data, len);
  23}
  24
  25/*
  26 * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
  27 * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
  28 * tag.
  29 */
  30static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
  31                csum_fn *fn, enum t10_dif_type type)
  32{
  33        unsigned int i;
  34
  35        for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  36                struct t10_pi_tuple *pi = iter->prot_buf;
  37
  38                pi->guard_tag = fn(iter->data_buf, iter->interval);
  39                pi->app_tag = 0;
  40
  41                if (type == T10_PI_TYPE1_PROTECTION)
  42                        pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
  43                else
  44                        pi->ref_tag = 0;
  45
  46                iter->data_buf += iter->interval;
  47                iter->prot_buf += sizeof(struct t10_pi_tuple);
  48                iter->seed++;
  49        }
  50
  51        return BLK_STS_OK;
  52}
  53
  54static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
  55                csum_fn *fn, enum t10_dif_type type)
  56{
  57        unsigned int i;
  58
  59        BUG_ON(type == T10_PI_TYPE0_PROTECTION);
  60
  61        for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  62                struct t10_pi_tuple *pi = iter->prot_buf;
  63                __be16 csum;
  64
  65                if (type == T10_PI_TYPE1_PROTECTION ||
  66                    type == T10_PI_TYPE2_PROTECTION) {
  67                        if (pi->app_tag == T10_PI_APP_ESCAPE)
  68                                goto next;
  69
  70                        if (be32_to_cpu(pi->ref_tag) !=
  71                            lower_32_bits(iter->seed)) {
  72                                pr_err("%s: ref tag error at location %llu " \
  73                                       "(rcvd %u)\n", iter->disk_name,
  74                                       (unsigned long long)
  75                                       iter->seed, be32_to_cpu(pi->ref_tag));
  76                                return BLK_STS_PROTECTION;
  77                        }
  78                } else if (type == T10_PI_TYPE3_PROTECTION) {
  79                        if (pi->app_tag == T10_PI_APP_ESCAPE &&
  80                            pi->ref_tag == T10_PI_REF_ESCAPE)
  81                                goto next;
  82                }
  83
  84                csum = fn(iter->data_buf, iter->interval);
  85
  86                if (pi->guard_tag != csum) {
  87                        pr_err("%s: guard tag error at sector %llu " \
  88                               "(rcvd %04x, want %04x)\n", iter->disk_name,
  89                               (unsigned long long)iter->seed,
  90                               be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
  91                        return BLK_STS_PROTECTION;
  92                }
  93
  94next:
  95                iter->data_buf += iter->interval;
  96                iter->prot_buf += sizeof(struct t10_pi_tuple);
  97                iter->seed++;
  98        }
  99
 100        return BLK_STS_OK;
 101}
 102
 103static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
 104{
 105        return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
 106}
 107
 108static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
 109{
 110        return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
 111}
 112
 113static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
 114{
 115        return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
 116}
 117
 118static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
 119{
 120        return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
 121}
 122
 123/**
 124 * t10_pi_type1_prepare - prepare PI prior submitting request to device
 125 * @rq:              request with PI that should be prepared
 126 *
 127 * For Type 1/Type 2, the virtual start sector is the one that was
 128 * originally submitted by the block layer for the ref_tag usage. Due to
 129 * partitioning, MD/DM cloning, etc. the actual physical start sector is
 130 * likely to be different. Remap protection information to match the
 131 * physical LBA.
 132 */
 133static void t10_pi_type1_prepare(struct request *rq)
 134{
 135        const int tuple_sz = rq->q->integrity.tuple_size;
 136        u32 ref_tag = t10_pi_ref_tag(rq);
 137        struct bio *bio;
 138
 139        __rq_for_each_bio(bio, rq) {
 140                struct bio_integrity_payload *bip = bio_integrity(bio);
 141                u32 virt = bip_get_seed(bip) & 0xffffffff;
 142                struct bio_vec iv;
 143                struct bvec_iter iter;
 144
 145                /* Already remapped? */
 146                if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
 147                        break;
 148
 149                bip_for_each_vec(iv, bip, iter) {
 150                        unsigned int j;
 151                        void *p;
 152
 153                        p = bvec_kmap_local(&iv);
 154                        for (j = 0; j < iv.bv_len; j += tuple_sz) {
 155                                struct t10_pi_tuple *pi = p;
 156
 157                                if (be32_to_cpu(pi->ref_tag) == virt)
 158                                        pi->ref_tag = cpu_to_be32(ref_tag);
 159                                virt++;
 160                                ref_tag++;
 161                                p += tuple_sz;
 162                        }
 163                        kunmap_local(p);
 164                }
 165
 166                bip->bip_flags |= BIP_MAPPED_INTEGRITY;
 167        }
 168}
 169
 170/**
 171 * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
 172 * @rq:              request with PI that should be prepared
 173 * @nr_bytes:        total bytes to prepare
 174 *
 175 * For Type 1/Type 2, the virtual start sector is the one that was
 176 * originally submitted by the block layer for the ref_tag usage. Due to
 177 * partitioning, MD/DM cloning, etc. the actual physical start sector is
 178 * likely to be different. Since the physical start sector was submitted
 179 * to the device, we should remap it back to virtual values expected by the
 180 * block layer.
 181 */
 182static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
 183{
 184        unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
 185        const int tuple_sz = rq->q->integrity.tuple_size;
 186        u32 ref_tag = t10_pi_ref_tag(rq);
 187        struct bio *bio;
 188
 189        __rq_for_each_bio(bio, rq) {
 190                struct bio_integrity_payload *bip = bio_integrity(bio);
 191                u32 virt = bip_get_seed(bip) & 0xffffffff;
 192                struct bio_vec iv;
 193                struct bvec_iter iter;
 194
 195                bip_for_each_vec(iv, bip, iter) {
 196                        unsigned int j;
 197                        void *p;
 198
 199                        p = bvec_kmap_local(&iv);
 200                        for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
 201                                struct t10_pi_tuple *pi = p;
 202
 203                                if (be32_to_cpu(pi->ref_tag) == ref_tag)
 204                                        pi->ref_tag = cpu_to_be32(virt);
 205                                virt++;
 206                                ref_tag++;
 207                                intervals--;
 208                                p += tuple_sz;
 209                        }
 210                        kunmap_local(p);
 211                }
 212        }
 213}
 214
 215static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
 216{
 217        return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
 218}
 219
 220static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
 221{
 222        return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
 223}
 224
 225static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
 226{
 227        return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
 228}
 229
 230static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
 231{
 232        return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
 233}
 234
 235/* Type 3 does not have a reference tag so no remapping is required. */
 236static void t10_pi_type3_prepare(struct request *rq)
 237{
 238}
 239
 240/* Type 3 does not have a reference tag so no remapping is required. */
 241static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
 242{
 243}
 244
 245const struct blk_integrity_profile t10_pi_type1_crc = {
 246        .name                   = "T10-DIF-TYPE1-CRC",
 247        .generate_fn            = t10_pi_type1_generate_crc,
 248        .verify_fn              = t10_pi_type1_verify_crc,
 249        .prepare_fn             = t10_pi_type1_prepare,
 250        .complete_fn            = t10_pi_type1_complete,
 251};
 252EXPORT_SYMBOL(t10_pi_type1_crc);
 253
 254const struct blk_integrity_profile t10_pi_type1_ip = {
 255        .name                   = "T10-DIF-TYPE1-IP",
 256        .generate_fn            = t10_pi_type1_generate_ip,
 257        .verify_fn              = t10_pi_type1_verify_ip,
 258        .prepare_fn             = t10_pi_type1_prepare,
 259        .complete_fn            = t10_pi_type1_complete,
 260};
 261EXPORT_SYMBOL(t10_pi_type1_ip);
 262
 263const struct blk_integrity_profile t10_pi_type3_crc = {
 264        .name                   = "T10-DIF-TYPE3-CRC",
 265        .generate_fn            = t10_pi_type3_generate_crc,
 266        .verify_fn              = t10_pi_type3_verify_crc,
 267        .prepare_fn             = t10_pi_type3_prepare,
 268        .complete_fn            = t10_pi_type3_complete,
 269};
 270EXPORT_SYMBOL(t10_pi_type3_crc);
 271
 272const struct blk_integrity_profile t10_pi_type3_ip = {
 273        .name                   = "T10-DIF-TYPE3-IP",
 274        .generate_fn            = t10_pi_type3_generate_ip,
 275        .verify_fn              = t10_pi_type3_verify_ip,
 276        .prepare_fn             = t10_pi_type3_prepare,
 277        .complete_fn            = t10_pi_type3_complete,
 278};
 279EXPORT_SYMBOL(t10_pi_type3_ip);
 280
 281MODULE_LICENSE("GPL");
 282