1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/hdreg.h>
9#include <linux/blk-mq.h>
10#include <linux/backing-dev.h>
11#include <linux/fs.h>
12#include <linux/ioctl.h>
13#include <linux/slab.h>
14#include <linux/ratelimit.h>
15#include <linux/genhd.h>
16#include <linux/netdevice.h>
17#include <linux/mutex.h>
18#include <linux/export.h>
19#include <linux/moduleparam.h>
20#include <linux/debugfs.h>
21#include <scsi/sg.h>
22#include "aoe.h"
23
24static DEFINE_MUTEX(aoeblk_mutex);
25static struct kmem_cache *buf_pool_cache;
26static struct dentry *aoe_debugfs_dir;
27
28
29static int aoe_maxsectors;
30module_param(aoe_maxsectors, int, 0644);
31MODULE_PARM_DESC(aoe_maxsectors,
32 "When nonzero, set the maximum number of sectors per I/O request");
33
34static ssize_t aoedisk_show_state(struct device *dev,
35 struct device_attribute *attr, char *page)
36{
37 struct gendisk *disk = dev_to_disk(dev);
38 struct aoedev *d = disk->private_data;
39
40 return snprintf(page, PAGE_SIZE,
41 "%s%s\n",
42 (d->flags & DEVFL_UP) ? "up" : "down",
43 (d->flags & DEVFL_KICKME) ? ",kickme" :
44 (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
45
46}
47static ssize_t aoedisk_show_mac(struct device *dev,
48 struct device_attribute *attr, char *page)
49{
50 struct gendisk *disk = dev_to_disk(dev);
51 struct aoedev *d = disk->private_data;
52 struct aoetgt *t = d->targets[0];
53
54 if (t == NULL)
55 return snprintf(page, PAGE_SIZE, "none\n");
56 return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
57}
58static ssize_t aoedisk_show_netif(struct device *dev,
59 struct device_attribute *attr, char *page)
60{
61 struct gendisk *disk = dev_to_disk(dev);
62 struct aoedev *d = disk->private_data;
63 struct net_device *nds[8], **nd, **nnd, **ne;
64 struct aoetgt **t, **te;
65 struct aoeif *ifp, *e;
66 char *p;
67
68 memset(nds, 0, sizeof nds);
69 nd = nds;
70 ne = nd + ARRAY_SIZE(nds);
71 t = d->targets;
72 te = t + d->ntargets;
73 for (; t < te && *t; t++) {
74 ifp = (*t)->ifs;
75 e = ifp + NAOEIFS;
76 for (; ifp < e && ifp->nd; ifp++) {
77 for (nnd = nds; nnd < nd; nnd++)
78 if (*nnd == ifp->nd)
79 break;
80 if (nnd == nd && nd != ne)
81 *nd++ = ifp->nd;
82 }
83 }
84
85 ne = nd;
86 nd = nds;
87 if (*nd == NULL)
88 return snprintf(page, PAGE_SIZE, "none\n");
89 for (p = page; nd < ne; nd++)
90 p += snprintf(p, PAGE_SIZE - (p-page), "%s%s",
91 p == page ? "" : ",", (*nd)->name);
92 p += snprintf(p, PAGE_SIZE - (p-page), "\n");
93 return p-page;
94}
95
96static ssize_t aoedisk_show_fwver(struct device *dev,
97 struct device_attribute *attr, char *page)
98{
99 struct gendisk *disk = dev_to_disk(dev);
100 struct aoedev *d = disk->private_data;
101
102 return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
103}
104static ssize_t aoedisk_show_payload(struct device *dev,
105 struct device_attribute *attr, char *page)
106{
107 struct gendisk *disk = dev_to_disk(dev);
108 struct aoedev *d = disk->private_data;
109
110 return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
111}
112
113static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
114{
115 struct aoedev *d;
116 struct aoetgt **t, **te;
117 struct aoeif *ifp, *ife;
118 unsigned long flags;
119 char c;
120
121 d = s->private;
122 seq_printf(s, "rttavg: %d rttdev: %d\n",
123 d->rttavg >> RTTSCALE,
124 d->rttdev >> RTTDSCALE);
125 seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
126 seq_printf(s, "kicked: %ld\n", d->kicked);
127 seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
128 seq_printf(s, "ref: %ld\n", d->ref);
129
130 spin_lock_irqsave(&d->lock, flags);
131 t = d->targets;
132 te = t + d->ntargets;
133 for (; t < te && *t; t++) {
134 c = '\t';
135 seq_printf(s, "falloc: %ld\n", (*t)->falloc);
136 seq_printf(s, "ffree: %p\n",
137 list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
138 seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
139 (*t)->maxout, (*t)->nframes);
140 seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
141 seq_printf(s, "\ttaint:%d\n", (*t)->taint);
142 seq_printf(s, "\tr:%d\n", (*t)->rpkts);
143 seq_printf(s, "\tw:%d\n", (*t)->wpkts);
144 ifp = (*t)->ifs;
145 ife = ifp + ARRAY_SIZE((*t)->ifs);
146 for (; ifp->nd && ifp < ife; ifp++) {
147 seq_printf(s, "%c%s", c, ifp->nd->name);
148 c = ',';
149 }
150 seq_puts(s, "\n");
151 }
152 spin_unlock_irqrestore(&d->lock, flags);
153
154 return 0;
155}
156
157static int aoe_debugfs_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, aoedisk_debugfs_show, inode->i_private);
160}
161
162static DEVICE_ATTR(state, 0444, aoedisk_show_state, NULL);
163static DEVICE_ATTR(mac, 0444, aoedisk_show_mac, NULL);
164static DEVICE_ATTR(netif, 0444, aoedisk_show_netif, NULL);
165static struct device_attribute dev_attr_firmware_version = {
166 .attr = { .name = "firmware-version", .mode = 0444 },
167 .show = aoedisk_show_fwver,
168};
169static DEVICE_ATTR(payload, 0444, aoedisk_show_payload, NULL);
170
171static struct attribute *aoe_attrs[] = {
172 &dev_attr_state.attr,
173 &dev_attr_mac.attr,
174 &dev_attr_netif.attr,
175 &dev_attr_firmware_version.attr,
176 &dev_attr_payload.attr,
177 NULL,
178};
179
180static const struct attribute_group aoe_attr_group = {
181 .attrs = aoe_attrs,
182};
183
184static const struct attribute_group *aoe_attr_groups[] = {
185 &aoe_attr_group,
186 NULL,
187};
188
189static const struct file_operations aoe_debugfs_fops = {
190 .open = aoe_debugfs_open,
191 .read = seq_read,
192 .llseek = seq_lseek,
193 .release = single_release,
194};
195
196static void
197aoedisk_add_debugfs(struct aoedev *d)
198{
199 struct dentry *entry;
200 char *p;
201
202 if (aoe_debugfs_dir == NULL)
203 return;
204 p = strchr(d->gd->disk_name, '/');
205 if (p == NULL)
206 p = d->gd->disk_name;
207 else
208 p++;
209 BUG_ON(*p == '\0');
210 entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
211 &aoe_debugfs_fops);
212 if (IS_ERR_OR_NULL(entry)) {
213 pr_info("aoe: cannot create debugfs file for %s\n",
214 d->gd->disk_name);
215 return;
216 }
217 BUG_ON(d->debugfs);
218 d->debugfs = entry;
219}
220void
221aoedisk_rm_debugfs(struct aoedev *d)
222{
223 debugfs_remove(d->debugfs);
224 d->debugfs = NULL;
225}
226
227static int
228aoeblk_open(struct block_device *bdev, fmode_t mode)
229{
230 struct aoedev *d = bdev->bd_disk->private_data;
231 ulong flags;
232
233 if (!virt_addr_valid(d)) {
234 pr_crit("aoe: invalid device pointer in %s\n",
235 __func__);
236 WARN_ON(1);
237 return -ENODEV;
238 }
239 if (!(d->flags & DEVFL_UP) || d->flags & DEVFL_TKILL)
240 return -ENODEV;
241
242 mutex_lock(&aoeblk_mutex);
243 spin_lock_irqsave(&d->lock, flags);
244 if (d->flags & DEVFL_UP && !(d->flags & DEVFL_TKILL)) {
245 d->nopen++;
246 spin_unlock_irqrestore(&d->lock, flags);
247 mutex_unlock(&aoeblk_mutex);
248 return 0;
249 }
250 spin_unlock_irqrestore(&d->lock, flags);
251 mutex_unlock(&aoeblk_mutex);
252 return -ENODEV;
253}
254
255static void
256aoeblk_release(struct gendisk *disk, fmode_t mode)
257{
258 struct aoedev *d = disk->private_data;
259 ulong flags;
260
261 spin_lock_irqsave(&d->lock, flags);
262
263 if (--d->nopen == 0) {
264 spin_unlock_irqrestore(&d->lock, flags);
265 aoecmd_cfg(d->aoemajor, d->aoeminor);
266 return;
267 }
268 spin_unlock_irqrestore(&d->lock, flags);
269}
270
271static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
272 const struct blk_mq_queue_data *bd)
273{
274 struct aoedev *d = hctx->queue->queuedata;
275
276 spin_lock_irq(&d->lock);
277
278 if ((d->flags & DEVFL_UP) == 0) {
279 pr_info_ratelimited("aoe: device %ld.%d is not up\n",
280 d->aoemajor, d->aoeminor);
281 spin_unlock_irq(&d->lock);
282 blk_mq_start_request(bd->rq);
283 return BLK_STS_IOERR;
284 }
285
286 list_add_tail(&bd->rq->queuelist, &d->rq_list);
287 aoecmd_work(d);
288 spin_unlock_irq(&d->lock);
289 return BLK_STS_OK;
290}
291
292static int
293aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
294{
295 struct aoedev *d = bdev->bd_disk->private_data;
296
297 if ((d->flags & DEVFL_UP) == 0) {
298 printk(KERN_ERR "aoe: disk not up\n");
299 return -ENODEV;
300 }
301
302 geo->cylinders = d->geo.cylinders;
303 geo->heads = d->geo.heads;
304 geo->sectors = d->geo.sectors;
305 return 0;
306}
307
308static int
309aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg)
310{
311 struct aoedev *d;
312
313 if (!arg)
314 return -EINVAL;
315
316 d = bdev->bd_disk->private_data;
317 if ((d->flags & DEVFL_UP) == 0) {
318 pr_err("aoe: disk not up\n");
319 return -ENODEV;
320 }
321
322 if (cmd == HDIO_GET_IDENTITY) {
323 if (!copy_to_user((void __user *) arg, &d->ident,
324 sizeof(d->ident)))
325 return 0;
326 return -EFAULT;
327 }
328
329
330 if (cmd != SG_IO)
331 pr_info("aoe: unknown ioctl 0x%x\n", cmd);
332
333 return -ENOTTY;
334}
335
336static const struct block_device_operations aoe_bdops = {
337 .open = aoeblk_open,
338 .release = aoeblk_release,
339 .ioctl = aoeblk_ioctl,
340 .getgeo = aoeblk_getgeo,
341 .owner = THIS_MODULE,
342};
343
344static const struct blk_mq_ops aoeblk_mq_ops = {
345 .queue_rq = aoeblk_queue_rq,
346};
347
348
349void
350aoeblk_gdalloc(void *vp)
351{
352 struct aoedev *d = vp;
353 struct gendisk *gd;
354 mempool_t *mp;
355 struct request_queue *q;
356 struct blk_mq_tag_set *set;
357 enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
358 ulong flags;
359 int late = 0;
360 int err;
361
362 spin_lock_irqsave(&d->lock, flags);
363 if (d->flags & DEVFL_GDALLOC
364 && !(d->flags & DEVFL_TKILL)
365 && !(d->flags & DEVFL_GD_NOW))
366 d->flags |= DEVFL_GD_NOW;
367 else
368 late = 1;
369 spin_unlock_irqrestore(&d->lock, flags);
370 if (late)
371 return;
372
373 gd = alloc_disk(AOE_PARTITIONS);
374 if (gd == NULL) {
375 pr_err("aoe: cannot allocate disk structure for %ld.%d\n",
376 d->aoemajor, d->aoeminor);
377 goto err;
378 }
379
380 mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab,
381 buf_pool_cache);
382 if (mp == NULL) {
383 printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
384 d->aoemajor, d->aoeminor);
385 goto err_disk;
386 }
387
388 set = &d->tag_set;
389 set->ops = &aoeblk_mq_ops;
390 set->cmd_size = sizeof(struct aoe_req);
391 set->nr_hw_queues = 1;
392 set->queue_depth = 128;
393 set->numa_node = NUMA_NO_NODE;
394 set->flags = BLK_MQ_F_SHOULD_MERGE;
395 err = blk_mq_alloc_tag_set(set);
396 if (err) {
397 pr_err("aoe: cannot allocate tag set for %ld.%d\n",
398 d->aoemajor, d->aoeminor);
399 goto err_mempool;
400 }
401
402 q = blk_mq_init_queue(set);
403 if (IS_ERR(q)) {
404 pr_err("aoe: cannot allocate block queue for %ld.%d\n",
405 d->aoemajor, d->aoeminor);
406 blk_mq_free_tag_set(set);
407 goto err_mempool;
408 }
409
410 spin_lock_irqsave(&d->lock, flags);
411 WARN_ON(!(d->flags & DEVFL_GD_NOW));
412 WARN_ON(!(d->flags & DEVFL_GDALLOC));
413 WARN_ON(d->flags & DEVFL_TKILL);
414 WARN_ON(d->gd);
415 WARN_ON(d->flags & DEVFL_UP);
416 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
417 q->backing_dev_info->name = "aoe";
418 q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
419 d->bufpool = mp;
420 d->blkq = gd->queue = q;
421 q->queuedata = d;
422 d->gd = gd;
423 if (aoe_maxsectors)
424 blk_queue_max_hw_sectors(q, aoe_maxsectors);
425 gd->major = AOE_MAJOR;
426 gd->first_minor = d->sysminor;
427 gd->fops = &aoe_bdops;
428 gd->private_data = d;
429 set_capacity(gd, d->ssize);
430 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
431 d->aoemajor, d->aoeminor);
432
433 d->flags &= ~DEVFL_GDALLOC;
434 d->flags |= DEVFL_UP;
435
436 spin_unlock_irqrestore(&d->lock, flags);
437
438 device_add_disk(NULL, gd, aoe_attr_groups);
439 aoedisk_add_debugfs(d);
440
441 spin_lock_irqsave(&d->lock, flags);
442 WARN_ON(!(d->flags & DEVFL_GD_NOW));
443 d->flags &= ~DEVFL_GD_NOW;
444 spin_unlock_irqrestore(&d->lock, flags);
445 return;
446
447err_mempool:
448 mempool_destroy(mp);
449err_disk:
450 put_disk(gd);
451err:
452 spin_lock_irqsave(&d->lock, flags);
453 d->flags &= ~DEVFL_GD_NOW;
454 schedule_work(&d->work);
455 spin_unlock_irqrestore(&d->lock, flags);
456}
457
458void
459aoeblk_exit(void)
460{
461 debugfs_remove_recursive(aoe_debugfs_dir);
462 aoe_debugfs_dir = NULL;
463 kmem_cache_destroy(buf_pool_cache);
464}
465
466int __init
467aoeblk_init(void)
468{
469 buf_pool_cache = kmem_cache_create("aoe_bufs",
470 sizeof(struct buf),
471 0, 0, NULL);
472 if (buf_pool_cache == NULL)
473 return -ENOMEM;
474 aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
475 if (IS_ERR_OR_NULL(aoe_debugfs_dir)) {
476 pr_info("aoe: cannot create debugfs directory\n");
477 aoe_debugfs_dir = NULL;
478 }
479 return 0;
480}
481
482