linux/drivers/block/rsxx/dev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3* Filename: dev.c
   4*
   5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
   6*       Philip Kelleher <pjk1939@linux.vnet.ibm.com>
   7*
   8* (C) Copyright 2013 IBM Corporation
   9*/
  10
  11#include <linux/kernel.h>
  12#include <linux/interrupt.h>
  13#include <linux/module.h>
  14#include <linux/pci.h>
  15#include <linux/slab.h>
  16
  17#include <linux/hdreg.h>
  18#include <linux/genhd.h>
  19#include <linux/blkdev.h>
  20#include <linux/bio.h>
  21
  22#include <linux/fs.h>
  23
  24#include "rsxx_priv.h"
  25
  26static unsigned int blkdev_minors = 64;
  27module_param(blkdev_minors, uint, 0444);
  28MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
  29
  30/*
  31 * For now I'm making this tweakable in case any applications hit this limit.
  32 * If you see a "bio too big" error in the log you will need to raise this
  33 * value.
  34 */
  35static unsigned int blkdev_max_hw_sectors = 1024;
  36module_param(blkdev_max_hw_sectors, uint, 0444);
  37MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
  38
  39static unsigned int enable_blkdev = 1;
  40module_param(enable_blkdev , uint, 0444);
  41MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
  42
  43
  44struct rsxx_bio_meta {
  45        struct bio      *bio;
  46        atomic_t        pending_dmas;
  47        atomic_t        error;
  48        unsigned long   start_time;
  49};
  50
  51static struct kmem_cache *bio_meta_pool;
  52
  53static blk_qc_t rsxx_submit_bio(struct bio *bio);
  54
  55/*----------------- Block Device Operations -----------------*/
  56static int rsxx_blkdev_ioctl(struct block_device *bdev,
  57                                 fmode_t mode,
  58                                 unsigned int cmd,
  59                                 unsigned long arg)
  60{
  61        struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
  62
  63        switch (cmd) {
  64        case RSXX_GETREG:
  65                return rsxx_reg_access(card, (void __user *)arg, 1);
  66        case RSXX_SETREG:
  67                return rsxx_reg_access(card, (void __user *)arg, 0);
  68        }
  69
  70        return -ENOTTY;
  71}
  72
  73static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  74{
  75        struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
  76        u64 blocks = card->size8 >> 9;
  77
  78        /*
  79         * get geometry: Fake it. I haven't found any drivers that set
  80         * geo->start, so we won't either.
  81         */
  82        if (card->size8) {
  83                geo->heads = 64;
  84                geo->sectors = 16;
  85                do_div(blocks, (geo->heads * geo->sectors));
  86                geo->cylinders = blocks;
  87        } else {
  88                geo->heads = 0;
  89                geo->sectors = 0;
  90                geo->cylinders = 0;
  91        }
  92        return 0;
  93}
  94
  95static const struct block_device_operations rsxx_fops = {
  96        .owner          = THIS_MODULE,
  97        .submit_bio     = rsxx_submit_bio,
  98        .getgeo         = rsxx_getgeo,
  99        .ioctl          = rsxx_blkdev_ioctl,
 100};
 101
 102static void bio_dma_done_cb(struct rsxx_cardinfo *card,
 103                            void *cb_data,
 104                            unsigned int error)
 105{
 106        struct rsxx_bio_meta *meta = cb_data;
 107
 108        if (error)
 109                atomic_set(&meta->error, 1);
 110
 111        if (atomic_dec_and_test(&meta->pending_dmas)) {
 112                if (!card->eeh_state && card->gendisk)
 113                        bio_end_io_acct(meta->bio, meta->start_time);
 114
 115                if (atomic_read(&meta->error))
 116                        bio_io_error(meta->bio);
 117                else
 118                        bio_endio(meta->bio);
 119                kmem_cache_free(bio_meta_pool, meta);
 120        }
 121}
 122
 123static blk_qc_t rsxx_submit_bio(struct bio *bio)
 124{
 125        struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
 126        struct rsxx_bio_meta *bio_meta;
 127        blk_status_t st = BLK_STS_IOERR;
 128
 129        blk_queue_split(&bio);
 130
 131        might_sleep();
 132
 133        if (!card)
 134                goto req_err;
 135
 136        if (bio_end_sector(bio) > get_capacity(card->gendisk))
 137                goto req_err;
 138
 139        if (unlikely(card->halt))
 140                goto req_err;
 141
 142        if (unlikely(card->dma_fault))
 143                goto req_err;
 144
 145        if (bio->bi_iter.bi_size == 0) {
 146                dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
 147                goto req_err;
 148        }
 149
 150        bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
 151        if (!bio_meta) {
 152                st = BLK_STS_RESOURCE;
 153                goto req_err;
 154        }
 155
 156        bio_meta->bio = bio;
 157        atomic_set(&bio_meta->error, 0);
 158        atomic_set(&bio_meta->pending_dmas, 0);
 159
 160        if (!unlikely(card->halt))
 161                bio_meta->start_time = bio_start_io_acct(bio);
 162
 163        dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
 164                 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
 165                 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
 166
 167        st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
 168                                    bio_dma_done_cb, bio_meta);
 169        if (st)
 170                goto queue_err;
 171
 172        return BLK_QC_T_NONE;
 173
 174queue_err:
 175        kmem_cache_free(bio_meta_pool, bio_meta);
 176req_err:
 177        if (st)
 178                bio->bi_status = st;
 179        bio_endio(bio);
 180        return BLK_QC_T_NONE;
 181}
 182
 183/*----------------- Device Setup -------------------*/
 184static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
 185{
 186        unsigned char pci_rev;
 187
 188        pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
 189
 190        return (pci_rev >= RSXX_DISCARD_SUPPORT);
 191}
 192
 193int rsxx_attach_dev(struct rsxx_cardinfo *card)
 194{
 195        mutex_lock(&card->dev_lock);
 196
 197        /* The block device requires the stripe size from the config. */
 198        if (enable_blkdev) {
 199                if (card->config_valid)
 200                        set_capacity(card->gendisk, card->size8 >> 9);
 201                else
 202                        set_capacity(card->gendisk, 0);
 203                device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
 204                card->bdev_attached = 1;
 205        }
 206
 207        mutex_unlock(&card->dev_lock);
 208
 209        return 0;
 210}
 211
 212void rsxx_detach_dev(struct rsxx_cardinfo *card)
 213{
 214        mutex_lock(&card->dev_lock);
 215
 216        if (card->bdev_attached) {
 217                del_gendisk(card->gendisk);
 218                card->bdev_attached = 0;
 219        }
 220
 221        mutex_unlock(&card->dev_lock);
 222}
 223
 224int rsxx_setup_dev(struct rsxx_cardinfo *card)
 225{
 226        unsigned short blk_size;
 227
 228        mutex_init(&card->dev_lock);
 229
 230        if (!enable_blkdev)
 231                return 0;
 232
 233        card->major = register_blkdev(0, DRIVER_NAME);
 234        if (card->major < 0) {
 235                dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
 236                return -ENOMEM;
 237        }
 238
 239        card->gendisk = blk_alloc_disk(blkdev_minors);
 240        if (!card->gendisk) {
 241                dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
 242                unregister_blkdev(card->major, DRIVER_NAME);
 243                return -ENOMEM;
 244        }
 245
 246        if (card->config_valid) {
 247                blk_size = card->config.data.block_size;
 248                blk_queue_dma_alignment(card->gendisk->queue, blk_size - 1);
 249                blk_queue_logical_block_size(card->gendisk->queue, blk_size);
 250        }
 251
 252        blk_queue_max_hw_sectors(card->gendisk->queue, blkdev_max_hw_sectors);
 253        blk_queue_physical_block_size(card->gendisk->queue, RSXX_HW_BLK_SIZE);
 254
 255        blk_queue_flag_set(QUEUE_FLAG_NONROT, card->gendisk->queue);
 256        blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->gendisk->queue);
 257        if (rsxx_discard_supported(card)) {
 258                blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->gendisk->queue);
 259                blk_queue_max_discard_sectors(card->gendisk->queue,
 260                                                RSXX_HW_BLK_SIZE >> 9);
 261                card->gendisk->queue->limits.discard_granularity =
 262                        RSXX_HW_BLK_SIZE;
 263                card->gendisk->queue->limits.discard_alignment =
 264                        RSXX_HW_BLK_SIZE;
 265        }
 266
 267        snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
 268                 "rsxx%d", card->disk_id);
 269        card->gendisk->major = card->major;
 270        card->gendisk->minors = blkdev_minors;
 271        card->gendisk->fops = &rsxx_fops;
 272        card->gendisk->private_data = card;
 273
 274        return 0;
 275}
 276
 277void rsxx_destroy_dev(struct rsxx_cardinfo *card)
 278{
 279        if (!enable_blkdev)
 280                return;
 281
 282        blk_cleanup_disk(card->gendisk);
 283        card->gendisk = NULL;
 284        unregister_blkdev(card->major, DRIVER_NAME);
 285}
 286
 287int rsxx_dev_init(void)
 288{
 289        bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
 290        if (!bio_meta_pool)
 291                return -ENOMEM;
 292
 293        return 0;
 294}
 295
 296void rsxx_dev_cleanup(void)
 297{
 298        kmem_cache_destroy(bio_meta_pool);
 299}
 300
 301
 302