1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/kernel.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/list.h>
26#include <linux/fs.h>
27#include <linux/mtd/blktrans.h>
28#include <linux/mtd/mtd.h>
29#include <linux/blkdev.h>
30#include <linux/blkpg.h>
31#include <linux/spinlock.h>
32#include <linux/hdreg.h>
33#include <linux/mutex.h>
34#include <linux/uaccess.h>
35
36#include "mtdcore.h"
37
38static LIST_HEAD(blktrans_majors);
39static DEFINE_MUTEX(blktrans_ref_mutex);
40
41static void blktrans_dev_release(struct kref *kref)
42{
43 struct mtd_blktrans_dev *dev =
44 container_of(kref, struct mtd_blktrans_dev, ref);
45
46 dev->disk->private_data = NULL;
47 blk_cleanup_queue(dev->rq);
48 put_disk(dev->disk);
49 list_del(&dev->list);
50 kfree(dev);
51}
52
53static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
54{
55 struct mtd_blktrans_dev *dev;
56
57 mutex_lock(&blktrans_ref_mutex);
58 dev = disk->private_data;
59
60 if (!dev)
61 goto unlock;
62 kref_get(&dev->ref);
63unlock:
64 mutex_unlock(&blktrans_ref_mutex);
65 return dev;
66}
67
68static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
69{
70 mutex_lock(&blktrans_ref_mutex);
71 kref_put(&dev->ref, blktrans_dev_release);
72 mutex_unlock(&blktrans_ref_mutex);
73}
74
75
76static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
77 struct mtd_blktrans_dev *dev,
78 struct request *req)
79{
80 unsigned long block, nsect;
81 char *buf;
82
83 block = blk_rq_pos(req) << 9 >> tr->blkshift;
84 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
85
86 if (req_op(req) == REQ_OP_FLUSH) {
87 if (tr->flush(dev))
88 return BLK_STS_IOERR;
89 return BLK_STS_OK;
90 }
91
92 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
93 get_capacity(req->rq_disk))
94 return BLK_STS_IOERR;
95
96 switch (req_op(req)) {
97 case REQ_OP_DISCARD:
98 if (tr->discard(dev, block, nsect))
99 return BLK_STS_IOERR;
100 return BLK_STS_OK;
101 case REQ_OP_READ:
102 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
103 for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
104 if (tr->readsect(dev, block, buf)) {
105 kunmap(bio_page(req->bio));
106 return BLK_STS_IOERR;
107 }
108 }
109 kunmap(bio_page(req->bio));
110 rq_flush_dcache_pages(req);
111 return BLK_STS_OK;
112 case REQ_OP_WRITE:
113 if (!tr->writesect)
114 return BLK_STS_IOERR;
115
116 rq_flush_dcache_pages(req);
117 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
118 for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
119 if (tr->writesect(dev, block, buf)) {
120 kunmap(bio_page(req->bio));
121 return BLK_STS_IOERR;
122 }
123 }
124 kunmap(bio_page(req->bio));
125 return BLK_STS_OK;
126 default:
127 return BLK_STS_IOERR;
128 }
129}
130
131int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
132{
133 return dev->bg_stop;
134}
135EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
136
137static void mtd_blktrans_work(struct work_struct *work)
138{
139 struct mtd_blktrans_dev *dev =
140 container_of(work, struct mtd_blktrans_dev, work);
141 struct mtd_blktrans_ops *tr = dev->tr;
142 struct request_queue *rq = dev->rq;
143 struct request *req = NULL;
144 int background_done = 0;
145
146 spin_lock_irq(rq->queue_lock);
147
148 while (1) {
149 blk_status_t res;
150
151 dev->bg_stop = false;
152 if (!req && !(req = blk_fetch_request(rq))) {
153 if (tr->background && !background_done) {
154 spin_unlock_irq(rq->queue_lock);
155 mutex_lock(&dev->lock);
156 tr->background(dev);
157 mutex_unlock(&dev->lock);
158 spin_lock_irq(rq->queue_lock);
159
160
161
162
163 background_done = !dev->bg_stop;
164 continue;
165 }
166 break;
167 }
168
169 spin_unlock_irq(rq->queue_lock);
170
171 mutex_lock(&dev->lock);
172 res = do_blktrans_request(dev->tr, dev, req);
173 mutex_unlock(&dev->lock);
174
175 spin_lock_irq(rq->queue_lock);
176
177 if (!__blk_end_request_cur(req, res))
178 req = NULL;
179
180 background_done = 0;
181 }
182
183 spin_unlock_irq(rq->queue_lock);
184}
185
186static void mtd_blktrans_request(struct request_queue *rq)
187{
188 struct mtd_blktrans_dev *dev;
189 struct request *req = NULL;
190
191 dev = rq->queuedata;
192
193 if (!dev)
194 while ((req = blk_fetch_request(rq)) != NULL)
195 __blk_end_request_all(req, BLK_STS_IOERR);
196 else
197 queue_work(dev->wq, &dev->work);
198}
199
200static int blktrans_open(struct block_device *bdev, fmode_t mode)
201{
202 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
203 int ret = 0;
204
205 if (!dev)
206 return -ERESTARTSYS;
207
208 mutex_lock(&mtd_table_mutex);
209 mutex_lock(&dev->lock);
210
211 if (dev->open)
212 goto unlock;
213
214 kref_get(&dev->ref);
215 __module_get(dev->tr->owner);
216
217 if (!dev->mtd)
218 goto unlock;
219
220 if (dev->tr->open) {
221 ret = dev->tr->open(dev);
222 if (ret)
223 goto error_put;
224 }
225
226 ret = __get_mtd_device(dev->mtd);
227 if (ret)
228 goto error_release;
229 dev->file_mode = mode;
230
231unlock:
232 dev->open++;
233 mutex_unlock(&dev->lock);
234 mutex_unlock(&mtd_table_mutex);
235 blktrans_dev_put(dev);
236 return ret;
237
238error_release:
239 if (dev->tr->release)
240 dev->tr->release(dev);
241error_put:
242 module_put(dev->tr->owner);
243 kref_put(&dev->ref, blktrans_dev_release);
244 mutex_unlock(&dev->lock);
245 mutex_unlock(&mtd_table_mutex);
246 blktrans_dev_put(dev);
247 return ret;
248}
249
250static void blktrans_release(struct gendisk *disk, fmode_t mode)
251{
252 struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
253
254 if (!dev)
255 return;
256
257 mutex_lock(&mtd_table_mutex);
258 mutex_lock(&dev->lock);
259
260 if (--dev->open)
261 goto unlock;
262
263 kref_put(&dev->ref, blktrans_dev_release);
264 module_put(dev->tr->owner);
265
266 if (dev->mtd) {
267 if (dev->tr->release)
268 dev->tr->release(dev);
269 __put_mtd_device(dev->mtd);
270 }
271unlock:
272 mutex_unlock(&dev->lock);
273 mutex_unlock(&mtd_table_mutex);
274 blktrans_dev_put(dev);
275}
276
277static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
278{
279 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
280 int ret = -ENXIO;
281
282 if (!dev)
283 return ret;
284
285 mutex_lock(&dev->lock);
286
287 if (!dev->mtd)
288 goto unlock;
289
290 ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
291unlock:
292 mutex_unlock(&dev->lock);
293 blktrans_dev_put(dev);
294 return ret;
295}
296
297static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
298 unsigned int cmd, unsigned long arg)
299{
300 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
301 int ret = -ENXIO;
302
303 if (!dev)
304 return ret;
305
306 mutex_lock(&dev->lock);
307
308 if (!dev->mtd)
309 goto unlock;
310
311 switch (cmd) {
312 case BLKFLSBUF:
313 ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
314 break;
315 default:
316 ret = -ENOTTY;
317 }
318unlock:
319 mutex_unlock(&dev->lock);
320 blktrans_dev_put(dev);
321 return ret;
322}
323
324static const struct block_device_operations mtd_block_ops = {
325 .owner = THIS_MODULE,
326 .open = blktrans_open,
327 .release = blktrans_release,
328 .ioctl = blktrans_ioctl,
329 .getgeo = blktrans_getgeo,
330};
331
332int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
333{
334 struct mtd_blktrans_ops *tr = new->tr;
335 struct mtd_blktrans_dev *d;
336 int last_devnum = -1;
337 struct gendisk *gd;
338 int ret;
339
340 if (mutex_trylock(&mtd_table_mutex)) {
341 mutex_unlock(&mtd_table_mutex);
342 BUG();
343 }
344
345 mutex_lock(&blktrans_ref_mutex);
346 list_for_each_entry(d, &tr->devs, list) {
347 if (new->devnum == -1) {
348
349 if (d->devnum != last_devnum+1) {
350
351 new->devnum = last_devnum+1;
352 list_add_tail(&new->list, &d->list);
353 goto added;
354 }
355 } else if (d->devnum == new->devnum) {
356
357 mutex_unlock(&blktrans_ref_mutex);
358 return -EBUSY;
359 } else if (d->devnum > new->devnum) {
360
361 list_add_tail(&new->list, &d->list);
362 goto added;
363 }
364 last_devnum = d->devnum;
365 }
366
367 ret = -EBUSY;
368 if (new->devnum == -1)
369 new->devnum = last_devnum+1;
370
371
372
373
374 if (new->devnum > (MINORMASK >> tr->part_bits) ||
375 (tr->part_bits && new->devnum >= 27 * 26)) {
376 mutex_unlock(&blktrans_ref_mutex);
377 goto error1;
378 }
379
380 list_add_tail(&new->list, &tr->devs);
381 added:
382 mutex_unlock(&blktrans_ref_mutex);
383
384 mutex_init(&new->lock);
385 kref_init(&new->ref);
386 if (!tr->writesect)
387 new->readonly = 1;
388
389
390 ret = -ENOMEM;
391 gd = alloc_disk(1 << tr->part_bits);
392
393 if (!gd)
394 goto error2;
395
396 new->disk = gd;
397 gd->private_data = new;
398 gd->major = tr->major;
399 gd->first_minor = (new->devnum) << tr->part_bits;
400 gd->fops = &mtd_block_ops;
401
402 if (tr->part_bits)
403 if (new->devnum < 26)
404 snprintf(gd->disk_name, sizeof(gd->disk_name),
405 "%s%c", tr->name, 'a' + new->devnum);
406 else
407 snprintf(gd->disk_name, sizeof(gd->disk_name),
408 "%s%c%c", tr->name,
409 'a' - 1 + new->devnum / 26,
410 'a' + new->devnum % 26);
411 else
412 snprintf(gd->disk_name, sizeof(gd->disk_name),
413 "%s%d", tr->name, new->devnum);
414
415 set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
416
417
418 spin_lock_init(&new->queue_lock);
419 new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
420
421 if (!new->rq)
422 goto error3;
423
424 if (tr->flush)
425 blk_queue_write_cache(new->rq, true, false);
426
427 new->rq->queuedata = new;
428 blk_queue_logical_block_size(new->rq, tr->blksize);
429
430 blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
431 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
432
433 if (tr->discard) {
434 blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
435 blk_queue_max_discard_sectors(new->rq, UINT_MAX);
436 }
437
438 gd->queue = new->rq;
439
440
441 new->wq = alloc_workqueue("%s%d", 0, 0,
442 tr->name, new->mtd->index);
443 if (!new->wq)
444 goto error4;
445 INIT_WORK(&new->work, mtd_blktrans_work);
446
447 if (new->readonly)
448 set_disk_ro(gd, 1);
449
450 device_add_disk(&new->mtd->dev, gd);
451
452 if (new->disk_attributes) {
453 ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
454 new->disk_attributes);
455 WARN_ON(ret);
456 }
457 return 0;
458error4:
459 blk_cleanup_queue(new->rq);
460error3:
461 put_disk(new->disk);
462error2:
463 list_del(&new->list);
464error1:
465 return ret;
466}
467
468int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
469{
470 unsigned long flags;
471
472 if (mutex_trylock(&mtd_table_mutex)) {
473 mutex_unlock(&mtd_table_mutex);
474 BUG();
475 }
476
477 if (old->disk_attributes)
478 sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
479 old->disk_attributes);
480
481
482 del_gendisk(old->disk);
483
484
485 destroy_workqueue(old->wq);
486
487
488 spin_lock_irqsave(&old->queue_lock, flags);
489 old->rq->queuedata = NULL;
490 blk_start_queue(old->rq);
491 spin_unlock_irqrestore(&old->queue_lock, flags);
492
493
494
495 mutex_lock(&old->lock);
496 if (old->open) {
497 if (old->tr->release)
498 old->tr->release(old);
499 __put_mtd_device(old->mtd);
500 }
501
502 old->mtd = NULL;
503
504 mutex_unlock(&old->lock);
505 blktrans_dev_put(old);
506 return 0;
507}
508
509static void blktrans_notify_remove(struct mtd_info *mtd)
510{
511 struct mtd_blktrans_ops *tr;
512 struct mtd_blktrans_dev *dev, *next;
513
514 list_for_each_entry(tr, &blktrans_majors, list)
515 list_for_each_entry_safe(dev, next, &tr->devs, list)
516 if (dev->mtd == mtd)
517 tr->remove_dev(dev);
518}
519
520static void blktrans_notify_add(struct mtd_info *mtd)
521{
522 struct mtd_blktrans_ops *tr;
523
524 if (mtd->type == MTD_ABSENT)
525 return;
526
527 list_for_each_entry(tr, &blktrans_majors, list)
528 tr->add_mtd(tr, mtd);
529}
530
531static struct mtd_notifier blktrans_notifier = {
532 .add = blktrans_notify_add,
533 .remove = blktrans_notify_remove,
534};
535
536int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
537{
538 struct mtd_info *mtd;
539 int ret;
540
541
542
543
544 if (!blktrans_notifier.list.next)
545 register_mtd_user(&blktrans_notifier);
546
547
548 mutex_lock(&mtd_table_mutex);
549
550 ret = register_blkdev(tr->major, tr->name);
551 if (ret < 0) {
552 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
553 tr->name, tr->major, ret);
554 mutex_unlock(&mtd_table_mutex);
555 return ret;
556 }
557
558 if (ret)
559 tr->major = ret;
560
561 tr->blkshift = ffs(tr->blksize) - 1;
562
563 INIT_LIST_HEAD(&tr->devs);
564 list_add(&tr->list, &blktrans_majors);
565
566 mtd_for_each_device(mtd)
567 if (mtd->type != MTD_ABSENT)
568 tr->add_mtd(tr, mtd);
569
570 mutex_unlock(&mtd_table_mutex);
571 return 0;
572}
573
574int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
575{
576 struct mtd_blktrans_dev *dev, *next;
577
578 mutex_lock(&mtd_table_mutex);
579
580
581 list_del(&tr->list);
582
583 list_for_each_entry_safe(dev, next, &tr->devs, list)
584 tr->remove_dev(dev);
585
586 unregister_blkdev(tr->major, tr->name);
587 mutex_unlock(&mtd_table_mutex);
588
589 BUG_ON(!list_empty(&tr->devs));
590 return 0;
591}
592
593static void __exit mtd_blktrans_exit(void)
594{
595
596
597 if (blktrans_notifier.list.next)
598 unregister_mtd_user(&blktrans_notifier);
599}
600
601module_exit(mtd_blktrans_exit);
602
603EXPORT_SYMBOL_GPL(register_mtd_blktrans);
604EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
605EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
606EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
607
608MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
609MODULE_LICENSE("GPL");
610MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
611